diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 927ecfdd41..f4157c5f6e 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -26,7 +26,6 @@ python3 -m nox --version # build Gemini docs nox -s gemini_docs - # create metadata python3 -m docuploader create-metadata \ --name="vertexai" \ @@ -36,16 +35,11 @@ python3 -m docuploader create-metadata \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - cat docs.metadata - # upload docs python3 -m docuploader upload gemini_docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" - - # Gemini docfx yaml files nox -s gemini_docfx - # create metadata. python3 -m docuploader create-metadata \ --name="vertexai" \ @@ -56,12 +50,9 @@ python3 -m docuploader create-metadata \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) \ --stem="/vertex-ai/generative-ai/docs/reference/python" - cat docs.metadata - # upload docs python3 -m docuploader upload gemini_docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" - # build docs nox -s docs diff --git a/.kokoro/release.sh b/.kokoro/release.sh index 5f51c08e48..41cedde8ef 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -22,8 +22,16 @@ python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source / # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -# Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1") +# Move into the `google-cloud-aiplatform` package, build the distribution and upload. +GCA_TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1") cd github/python-aiplatform python3 setup.py sdist bdist_wheel -twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* +twine upload --username __token__ --password "${GCA_TWINE_PASSWORD}" dist/* + +# Move into the `vertexai` package, build the distribution and upload. +VERTEXAI_TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_vertexai-pypi-token-1") +cd github/python-aiplatform/pypi/_vertex_ai_placeholder +python3 -m build +twine upload --username __token__ --password "${VERTEXAI_TWINE_PASSWORD}" dist/* + + diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index e941926f58..f1a3af8197 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,7 +23,7 @@ env_vars: { value: "github/python-aiplatform/.kokoro/release.sh" } -# Fetch PyPI password +# Fetch google-cloud-aiplatform PyPI password before_action { fetch_keystore { keystore_resource { @@ -33,6 +33,16 @@ before_action { } } +# Fetch vertexai PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "vertexai-pypi-token-1" + } + } +} + # Tokens needed to report release status back to GitHub env_vars: { key: "SECRET_MANAGER_KEYS" diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in index ec867d9fd6..58b015fc28 100644 --- a/.kokoro/requirements.in +++ b/.kokoro/requirements.in @@ -8,3 +8,4 @@ setuptools nox>=2022.11.21 # required to remove dependency on py charset-normalizer<3 click<8.1.0 +build \ No newline at end of file diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 8957e21104..752b809b79 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in @@ -12,6 +12,10 @@ attrs==23.1.0 \ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via gcp-releasetool +build==1.2.1 \ + --hash=sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d \ + --hash=sha256:75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4 + # via -r requirements.in cachetools==5.3.2 \ --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \ --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1 @@ -373,6 +377,7 @@ packaging==23.2 \ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via + # build # gcp-releasetool # nox pkginfo==1.9.6 \ @@ -438,6 +443,10 @@ pyjwt==2.8.0 \ pyperclip==1.8.2 \ --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 # via gcp-releasetool +pyproject-hooks==1.0.0 \ + --hash=sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8 \ + --hash=sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5 + # via build python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8887e45f6f..9172d96d2d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.60.0" + ".": "1.61.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 09a4b59f7f..25e69f3dbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## [1.61.0](https://github.com/googleapis/python-aiplatform/compare/v1.60.0...v1.61.0) (2024-08-05) + + +### Features + +* Add a warning message for scheduled deprecation of Coherence metric class ([7f238fb](https://github.com/googleapis/python-aiplatform/commit/7f238fb3cebc44893b4e6959a77743cc4d96138e)) +* Add deprecation messages for all model-based metric classes ([71c0fd3](https://github.com/googleapis/python-aiplatform/commit/71c0fd397139a95b6045f898e906ce11b2e7e8ce)) +* Add support for task type (CODE_RETRIEVAL_QUERY) through get_embeddings. ([f2ce1e4](https://github.com/googleapis/python-aiplatform/commit/f2ce1e4caea9f344e39fc3232f697b1a6ea4f99a)) +* Add system_instruction to LangchainAgent template. ([c71c3dd](https://github.com/googleapis/python-aiplatform/commit/c71c3ddbfeaa577dfce683b3299d94e77d1c4895)) +* Adding Slack and Jira data connector for RAG to SDK ([d92e7c9](https://github.com/googleapis/python-aiplatform/commit/d92e7c91d280dd417d2c2a2cf5abc36592888593)) +* Allow protobuf 5.x ([ce9cd5d](https://github.com/googleapis/python-aiplatform/commit/ce9cd5def14597822c1d071e438cf63b6d4ba3ca)) +* LVM - Release `ImageGenerationModel` to GA ([718c199](https://github.com/googleapis/python-aiplatform/commit/718c1997778310b6898344b2e5a34513e7a82e5f)) +* Support "update" for reasoning engine. ([b73ef3e](https://github.com/googleapis/python-aiplatform/commit/b73ef3eaa2d88dbc8071e3a4f0c7da934683fc2a)) +* Update Rapid Evaluation Service QPS. Add a customizable evaluation service QPS parameter. ([9ee9289](https://github.com/googleapis/python-aiplatform/commit/9ee9289fbe5face719515e453d4f81648b44e7b1)) + + +### Documentation + +* Change init sample to use vertexai ([829e0f6](https://github.com/googleapis/python-aiplatform/commit/829e0f6fd286cf2de2ac307a836305766473faef)) +* Make small fixes to file import documentation ([f7d65c3](https://github.com/googleapis/python-aiplatform/commit/f7d65c32948c54bcf3a6927639f2173b556bb310)) + ## [1.60.0](https://github.com/googleapis/python-aiplatform/compare/v1.59.0...v1.60.0) (2024-07-24) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index 47af63aedd..b437f1f4d1 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -387,12 +387,14 @@ def get_resource_type(self) -> _Product: return self._resource_type vertex_product = os.getenv("VERTEX_PRODUCT") - if vertex_product == "COLAB_ENTERPRISE": - self._resource_type = _Product.COLAB_ENTERPRISE - if vertex_product == "WORKBENCH_CUSTOM_CONTAINER": - self._resource_type = _Product.WORKBENCH_CUSTOM_CONTAINER - if vertex_product == "WORKBENCH_INSTANCE": - self._resource_type = _Product.WORKBENCH_INSTANCE + product_mapping = { + "COLAB_ENTERPRISE": _Product.COLAB_ENTERPRISE, + "WORKBENCH_CUSTOM_CONTAINER": _Product.WORKBENCH_CUSTOM_CONTAINER, + "WORKBENCH_INSTANCE": _Product.WORKBENCH_INSTANCE, + } + + if vertex_product in product_mapping: + self._resource_type = product_mapping[vertex_product] return self._resource_type diff --git a/google/cloud/aiplatform/matching_engine/_protos/README b/google/cloud/aiplatform/matching_engine/_protos/README index 77520f99fa..5a31eea525 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/README +++ b/google/cloud/aiplatform/matching_engine/_protos/README @@ -4,7 +4,7 @@ Track bazel progress here: https://github.com/googleapis/python-aiplatform/issue 1 . Ensure that your environment is using python 3.10 or older which is needed for step 2. Consider using Anaconda to create envs with older python version. -2. Use pip install `grpcio-tools==1.48.2` because protobuf 3.x is still supported by this library. +2. Use `pip install grpcio-tools==1.59.0` or newer to support protobuf 5.x 3. Copy the file from `https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto` @@ -22,7 +22,7 @@ from google.cloud.aiplatform.matching_engine._protos import match_service_pb2 as ``` to ``` -google.cloud.aiplatform.matching_engine._protos import match_service_pb2 +from google.cloud.aiplatform.matching_engine._protos import match_service_pb2 ``` Also, run a find a replace to change `google_dot_cloud_dot_aiplatform_dot_matching__engine_dot___protos_dot_match__service__pb2` diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py index 8da41a0b85..77b44a8ce4 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py @@ -17,232 +17,62 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/aiplatform/matching_engine/_protos/match_service.proto """Generated protocol buffer code.""" + +from google.protobuf.internal import builder as _builder +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12$google.cloud.aiplatform.container.v1\x1a\x17google/rpc/status.proto"7\n\x0fSparseEmbedding\x12\x11\n\tfloat_val\x18\x01 \x03(\x02\x12\x11\n\tdimension\x18\x02 \x03(\x03"\xff\x04\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x0c \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x45\n\x03rrf\x18\r \x01(\x0b\x32\x36.google.cloud.aiplatform.container.v1.MatchRequest.RRFH\x00\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12\x42\n\trestricts\x18\x04 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x0b \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05\x12.\n&fraction_leaf_nodes_to_search_override\x18\t \x01(\x01\x12\x19\n\x11\x65mbedding_enabled\x18\x08 \x01(\x08\x12\x14\n\x0c\x65mbedding_id\x18\n \x01(\t\x1a\x14\n\x03RRF\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x42\t\n\x07ranking"\xae\x02\n\tEmbedding\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x06 \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x42\n\trestricts\x18\x03 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x05 \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12\x1a\n\x12\x63rowding_attribute\x18\x04 \x01(\x03"\x83\x02\n\rMatchResponse\x12N\n\x08neighbor\x18\x01 \x03(\x0b\x32<.google.cloud.aiplatform.container.v1.MatchResponse.Neighbor\x12\x43\n\nembeddings\x18\x02 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding\x1a]\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01\x12\x17\n\x0fsparse_distance\x18\x04 \x01(\x01\x12\x1a\n\x12\x63rowding_attribute\x18\x03 \x01(\x03"B\n\x19\x42\x61tchGetEmbeddingsRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x03(\t"a\n\x1a\x42\x61tchGetEmbeddingsResponse\x12\x43\n\nembeddings\x18\x01 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding"\x95\x02\n\x11\x42\x61tchMatchRequest\x12\x63\n\x08requests\x18\x01 \x03(\x0b\x32Q.google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9a\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.google.cloud.aiplatform.container.v1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xa2\x02\n\x12\x42\x61tchMatchResponse\x12\x66\n\tresponses\x18\x01 \x03(\x0b\x32S.google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa3\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x46\n\tresponses\x18\x02 \x03(\x0b\x32\x33.google.cloud.aiplatform.container.v1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t"\xb4\x02\n\x10NumericNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tvalue_int\x18\x02 \x01(\x03H\x00\x12\x15\n\x0bvalue_float\x18\x03 \x01(\x02H\x00\x12\x16\n\x0cvalue_double\x18\x04 \x01(\x01H\x00\x12K\n\x02op\x18\x05 \x01(\x0e\x32?.google.cloud.aiplatform.container.v1.NumericNamespace.Operator"x\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x08\n\x04LESS\x10\x01\x12\x0e\n\nLESS_EQUAL\x10\x02\x12\t\n\x05\x45QUAL\x10\x03\x12\x11\n\rGREATER_EQUAL\x10\x04\x12\x0b\n\x07GREATER\x10\x05\x12\r\n\tNOT_EQUAL\x10\x06\x42\x07\n\x05Value2\xa2\x03\n\x0cMatchService\x12r\n\x05Match\x12\x32.google.cloud.aiplatform.container.v1.MatchRequest\x1a\x33.google.cloud.aiplatform.container.v1.MatchResponse"\x00\x12\x81\x01\n\nBatchMatch\x12\x37.google.cloud.aiplatform.container.v1.BatchMatchRequest\x1a\x38.google.cloud.aiplatform.container.v1.BatchMatchResponse"\x00\x12\x99\x01\n\x12\x42\x61tchGetEmbeddings\x12?.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest\x1a@.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse"\x00\x62\x06proto3' ) - -_SPARSEEMBEDDING = DESCRIPTOR.message_types_by_name["SparseEmbedding"] -_MATCHREQUEST = DESCRIPTOR.message_types_by_name["MatchRequest"] -_MATCHREQUEST_RRF = _MATCHREQUEST.nested_types_by_name["RRF"] -_EMBEDDING = DESCRIPTOR.message_types_by_name["Embedding"] -_MATCHRESPONSE = DESCRIPTOR.message_types_by_name["MatchResponse"] -_MATCHRESPONSE_NEIGHBOR = _MATCHRESPONSE.nested_types_by_name["Neighbor"] -_BATCHGETEMBEDDINGSREQUEST = DESCRIPTOR.message_types_by_name[ - "BatchGetEmbeddingsRequest" -] -_BATCHGETEMBEDDINGSRESPONSE = DESCRIPTOR.message_types_by_name[ - "BatchGetEmbeddingsResponse" -] -_BATCHMATCHREQUEST = DESCRIPTOR.message_types_by_name["BatchMatchRequest"] -_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX = _BATCHMATCHREQUEST.nested_types_by_name[ - "BatchMatchRequestPerIndex" -] -_BATCHMATCHRESPONSE = DESCRIPTOR.message_types_by_name["BatchMatchResponse"] -_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX = ( - _BATCHMATCHRESPONSE.nested_types_by_name["BatchMatchResponsePerIndex"] -) -_NAMESPACE = DESCRIPTOR.message_types_by_name["Namespace"] -_NUMERICNAMESPACE = DESCRIPTOR.message_types_by_name["NumericNamespace"] -_NUMERICNAMESPACE_OPERATOR = _NUMERICNAMESPACE.enum_types_by_name["Operator"] -SparseEmbedding = _reflection.GeneratedProtocolMessageType( - "SparseEmbedding", - (_message.Message,), - { - "DESCRIPTOR": _SPARSEEMBEDDING, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.SparseEmbedding) - }, -) -_sym_db.RegisterMessage(SparseEmbedding) - -MatchRequest = _reflection.GeneratedProtocolMessageType( - "MatchRequest", - (_message.Message,), - { - "RRF": _reflection.GeneratedProtocolMessageType( - "RRF", - (_message.Message,), - { - "DESCRIPTOR": _MATCHREQUEST_RRF, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchRequest.RRF) - }, - ), - "DESCRIPTOR": _MATCHREQUEST, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchRequest) - }, -) -_sym_db.RegisterMessage(MatchRequest) -_sym_db.RegisterMessage(MatchRequest.RRF) - -Embedding = _reflection.GeneratedProtocolMessageType( - "Embedding", - (_message.Message,), - { - "DESCRIPTOR": _EMBEDDING, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.Embedding) - }, -) -_sym_db.RegisterMessage(Embedding) - -MatchResponse = _reflection.GeneratedProtocolMessageType( - "MatchResponse", - (_message.Message,), - { - "Neighbor": _reflection.GeneratedProtocolMessageType( - "Neighbor", - (_message.Message,), - { - "DESCRIPTOR": _MATCHRESPONSE_NEIGHBOR, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchResponse.Neighbor) - }, - ), - "DESCRIPTOR": _MATCHRESPONSE, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchResponse) - }, +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages( + DESCRIPTOR, + "google.cloud.aiplatform.matching_engine._protos.match_service_pb2", + _globals, ) -_sym_db.RegisterMessage(MatchResponse) -_sym_db.RegisterMessage(MatchResponse.Neighbor) - -BatchGetEmbeddingsRequest = _reflection.GeneratedProtocolMessageType( - "BatchGetEmbeddingsRequest", - (_message.Message,), - { - "DESCRIPTOR": _BATCHGETEMBEDDINGSREQUEST, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest) - }, -) -_sym_db.RegisterMessage(BatchGetEmbeddingsRequest) - -BatchGetEmbeddingsResponse = _reflection.GeneratedProtocolMessageType( - "BatchGetEmbeddingsResponse", - (_message.Message,), - { - "DESCRIPTOR": _BATCHGETEMBEDDINGSRESPONSE, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse) - }, -) -_sym_db.RegisterMessage(BatchGetEmbeddingsResponse) - -BatchMatchRequest = _reflection.GeneratedProtocolMessageType( - "BatchMatchRequest", - (_message.Message,), - { - "BatchMatchRequestPerIndex": _reflection.GeneratedProtocolMessageType( - "BatchMatchRequestPerIndex", - (_message.Message,), - { - "DESCRIPTOR": _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex) - }, - ), - "DESCRIPTOR": _BATCHMATCHREQUEST, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchMatchRequest) - }, -) -_sym_db.RegisterMessage(BatchMatchRequest) -_sym_db.RegisterMessage(BatchMatchRequest.BatchMatchRequestPerIndex) - -BatchMatchResponse = _reflection.GeneratedProtocolMessageType( - "BatchMatchResponse", - (_message.Message,), - { - "BatchMatchResponsePerIndex": _reflection.GeneratedProtocolMessageType( - "BatchMatchResponsePerIndex", - (_message.Message,), - { - "DESCRIPTOR": _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex) - }, - ), - "DESCRIPTOR": _BATCHMATCHRESPONSE, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.BatchMatchResponse) - }, -) -_sym_db.RegisterMessage(BatchMatchResponse) -_sym_db.RegisterMessage(BatchMatchResponse.BatchMatchResponsePerIndex) - -Namespace = _reflection.GeneratedProtocolMessageType( - "Namespace", - (_message.Message,), - { - "DESCRIPTOR": _NAMESPACE, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.Namespace) - }, -) -_sym_db.RegisterMessage(Namespace) - -NumericNamespace = _reflection.GeneratedProtocolMessageType( - "NumericNamespace", - (_message.Message,), - { - "DESCRIPTOR": _NUMERICNAMESPACE, - "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.NumericNamespace) - }, -) -_sym_db.RegisterMessage(NumericNamespace) - -_MATCHSERVICE = DESCRIPTOR.services_by_name["MatchService"] if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _SPARSEEMBEDDING._serialized_start = 134 - _SPARSEEMBEDDING._serialized_end = 189 - _MATCHREQUEST._serialized_start = 192 - _MATCHREQUEST._serialized_end = 831 - _MATCHREQUEST_RRF._serialized_start = 800 - _MATCHREQUEST_RRF._serialized_end = 820 - _EMBEDDING._serialized_start = 834 - _EMBEDDING._serialized_end = 1136 - _MATCHRESPONSE._serialized_start = 1139 - _MATCHRESPONSE._serialized_end = 1398 - _MATCHRESPONSE_NEIGHBOR._serialized_start = 1305 - _MATCHRESPONSE_NEIGHBOR._serialized_end = 1398 - _BATCHGETEMBEDDINGSREQUEST._serialized_start = 1400 - _BATCHGETEMBEDDINGSREQUEST._serialized_end = 1466 - _BATCHGETEMBEDDINGSRESPONSE._serialized_start = 1468 - _BATCHGETEMBEDDINGSRESPONSE._serialized_end = 1565 - _BATCHMATCHREQUEST._serialized_start = 1568 - _BATCHMATCHREQUEST._serialized_end = 1845 - _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_start = 1691 - _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_end = 1845 - _BATCHMATCHRESPONSE._serialized_start = 1848 - _BATCHMATCHRESPONSE._serialized_end = 2138 - _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_start = 1975 - _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_end = 2138 - _NAMESPACE._serialized_start = 2140 - _NAMESPACE._serialized_end = 2208 - _NUMERICNAMESPACE._serialized_start = 2211 - _NUMERICNAMESPACE._serialized_end = 2519 - _NUMERICNAMESPACE_OPERATOR._serialized_start = 2390 - _NUMERICNAMESPACE_OPERATOR._serialized_end = 2510 - _MATCHSERVICE._serialized_start = 2522 - _MATCHSERVICE._serialized_end = 2940 + _globals["_SPARSEEMBEDDING"]._serialized_start = 134 + _globals["_SPARSEEMBEDDING"]._serialized_end = 189 + _globals["_MATCHREQUEST"]._serialized_start = 192 + _globals["_MATCHREQUEST"]._serialized_end = 831 + _globals["_MATCHREQUEST_RRF"]._serialized_start = 800 + _globals["_MATCHREQUEST_RRF"]._serialized_end = 820 + _globals["_EMBEDDING"]._serialized_start = 834 + _globals["_EMBEDDING"]._serialized_end = 1136 + _globals["_MATCHRESPONSE"]._serialized_start = 1139 + _globals["_MATCHRESPONSE"]._serialized_end = 1398 + _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_start = 1305 + _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_end = 1398 + _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_start = 1400 + _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_end = 1466 + _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_start = 1468 + _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_end = 1565 + _globals["_BATCHMATCHREQUEST"]._serialized_start = 1568 + _globals["_BATCHMATCHREQUEST"]._serialized_end = 1845 + _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_start = 1691 + _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_end = 1845 + _globals["_BATCHMATCHRESPONSE"]._serialized_start = 1848 + _globals["_BATCHMATCHRESPONSE"]._serialized_end = 2138 + _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_start = 1975 + _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_end = 2138 + _globals["_NAMESPACE"]._serialized_start = 2140 + _globals["_NAMESPACE"]._serialized_end = 2208 + _globals["_NUMERICNAMESPACE"]._serialized_start = 2211 + _globals["_NUMERICNAMESPACE"]._serialized_end = 2519 + _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_start = 2390 + _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_end = 2510 + _globals["_MATCHSERVICE"]._serialized_start = 2522 + _globals["_MATCHSERVICE"]._serialized_end = 2940 # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/aiplatform/telemetry.py b/google/cloud/aiplatform/telemetry.py index f0125f85d5..d3ada51ed8 100644 --- a/google/cloud/aiplatform/telemetry.py +++ b/google/cloud/aiplatform/telemetry.py @@ -59,8 +59,8 @@ def _append_tool_name(tool_name: str) -> None: def _pop_tool_name(tool_name: str) -> None: if not _tool_names_to_append or _tool_names_to_append[-1] != tool_name: - _LOGGER.warning( - "Gapic client context issue detected." + _LOGGER.debug( + "Gapic client context telemetry issue detected." + "This can occur due to parallelization." ) return diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 722a89b094..8372b02f53 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.60.0" +__version__ = "1.61.0" diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 7c6011ec3d..723a7b5aca 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -96,11 +96,14 @@ from .types.content import Content from .types.content import FileData from .types.content import GenerationConfig +from .types.content import GroundingChunk from .types.content import GroundingMetadata +from .types.content import GroundingSupport from .types.content import Part from .types.content import SafetyRating from .types.content import SafetySetting from .types.content import SearchEntryPoint +from .types.content import Segment from .types.content import VideoMetadata from .types.content import HarmCategory from .types.context import Context @@ -567,6 +570,7 @@ from .types.nas_job import NasTrialDetail from .types.network_spec import NetworkSpec from .types.notebook_euc_config import NotebookEucConfig +from .types.notebook_execution_job import NotebookExecutionJob from .types.notebook_idle_shutdown_config import NotebookIdleShutdownConfig from .types.notebook_runtime import NotebookRuntime from .types.notebook_runtime import NotebookRuntimeTemplate @@ -574,12 +578,18 @@ from .types.notebook_runtime_template_ref import NotebookRuntimeTemplateRef from .types.notebook_service import AssignNotebookRuntimeOperationMetadata from .types.notebook_service import AssignNotebookRuntimeRequest +from .types.notebook_service import CreateNotebookExecutionJobOperationMetadata +from .types.notebook_service import CreateNotebookExecutionJobRequest from .types.notebook_service import CreateNotebookRuntimeTemplateOperationMetadata from .types.notebook_service import CreateNotebookRuntimeTemplateRequest +from .types.notebook_service import DeleteNotebookExecutionJobRequest from .types.notebook_service import DeleteNotebookRuntimeRequest from .types.notebook_service import DeleteNotebookRuntimeTemplateRequest +from .types.notebook_service import GetNotebookExecutionJobRequest from .types.notebook_service import GetNotebookRuntimeRequest from .types.notebook_service import GetNotebookRuntimeTemplateRequest +from .types.notebook_service import ListNotebookExecutionJobsRequest +from .types.notebook_service import ListNotebookExecutionJobsResponse from .types.notebook_service import ListNotebookRuntimesRequest from .types.notebook_service import ListNotebookRuntimesResponse from .types.notebook_service import ListNotebookRuntimeTemplatesRequest @@ -591,11 +601,13 @@ from .types.notebook_service import UpgradeNotebookRuntimeOperationMetadata from .types.notebook_service import UpgradeNotebookRuntimeRequest from .types.notebook_service import UpgradeNotebookRuntimeResponse +from .types.notebook_service import NotebookExecutionJobView from .types.openapi import Schema from .types.openapi import Type from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata from .types.persistent_resource import PersistentResource +from .types.persistent_resource import RayLogsSpec from .types.persistent_resource import RayMetricSpec from .types.persistent_resource import RaySpec from .types.persistent_resource import ResourcePool @@ -935,6 +947,8 @@ "CreateMetadataStoreRequest", "CreateModelDeploymentMonitoringJobRequest", "CreateNasJobRequest", + "CreateNotebookExecutionJobOperationMetadata", + "CreateNotebookExecutionJobRequest", "CreateNotebookRuntimeTemplateOperationMetadata", "CreateNotebookRuntimeTemplateRequest", "CreatePersistentResourceOperationMetadata", @@ -992,6 +1006,7 @@ "DeleteModelRequest", "DeleteModelVersionRequest", "DeleteNasJobRequest", + "DeleteNotebookExecutionJobRequest", "DeleteNotebookRuntimeRequest", "DeleteNotebookRuntimeTemplateRequest", "DeleteOperationMetadata", @@ -1131,6 +1146,7 @@ "GetModelRequest", "GetNasJobRequest", "GetNasTrialDetailRequest", + "GetNotebookExecutionJobRequest", "GetNotebookRuntimeRequest", "GetNotebookRuntimeTemplateRequest", "GetPersistentResourceRequest", @@ -1147,7 +1163,9 @@ "GetTrialRequest", "GetTuningJobRequest", "GoogleSearchRetrieval", + "GroundingChunk", "GroundingMetadata", + "GroundingSupport", "HarmCategory", "HyperparameterTuningJob", "IdMatcher", @@ -1235,6 +1253,8 @@ "ListNasJobsResponse", "ListNasTrialDetailsRequest", "ListNasTrialDetailsResponse", + "ListNotebookExecutionJobsRequest", + "ListNotebookExecutionJobsResponse", "ListNotebookRuntimeTemplatesRequest", "ListNotebookRuntimeTemplatesResponse", "ListNotebookRuntimesRequest", @@ -1316,6 +1336,8 @@ "NetworkSpec", "NfsMount", "NotebookEucConfig", + "NotebookExecutionJob", + "NotebookExecutionJobView", "NotebookIdleShutdownConfig", "NotebookRuntime", "NotebookRuntimeTemplate", @@ -1366,6 +1388,7 @@ "QueryDeployedModelsResponse", "QueryExecutionInputsAndOutputsRequest", "RawPredictRequest", + "RayLogsSpec", "RayMetricSpec", "RaySpec", "ReadFeatureValuesRequest", @@ -1417,6 +1440,7 @@ "SearchModelDeploymentMonitoringStatsAnomaliesResponse", "SearchNearestEntitiesRequest", "SearchNearestEntitiesResponse", + "Segment", "ServiceAccountSpec", "ShieldedVmConfig", "SmoothGradConfig", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 1813153c40..cb891e9b06 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -3252,11 +3252,21 @@ "assign_notebook_runtime" ] }, + "CreateNotebookExecutionJob": { + "methods": [ + "create_notebook_execution_job" + ] + }, "CreateNotebookRuntimeTemplate": { "methods": [ "create_notebook_runtime_template" ] }, + "DeleteNotebookExecutionJob": { + "methods": [ + "delete_notebook_execution_job" + ] + }, "DeleteNotebookRuntime": { "methods": [ "delete_notebook_runtime" @@ -3267,6 +3277,11 @@ "delete_notebook_runtime_template" ] }, + "GetNotebookExecutionJob": { + "methods": [ + "get_notebook_execution_job" + ] + }, "GetNotebookRuntime": { "methods": [ "get_notebook_runtime" @@ -3277,6 +3292,11 @@ "get_notebook_runtime_template" ] }, + "ListNotebookExecutionJobs": { + "methods": [ + "list_notebook_execution_jobs" + ] + }, "ListNotebookRuntimeTemplates": { "methods": [ "list_notebook_runtime_templates" @@ -3312,11 +3332,21 @@ "assign_notebook_runtime" ] }, + "CreateNotebookExecutionJob": { + "methods": [ + "create_notebook_execution_job" + ] + }, "CreateNotebookRuntimeTemplate": { "methods": [ "create_notebook_runtime_template" ] }, + "DeleteNotebookExecutionJob": { + "methods": [ + "delete_notebook_execution_job" + ] + }, "DeleteNotebookRuntime": { "methods": [ "delete_notebook_runtime" @@ -3327,6 +3357,11 @@ "delete_notebook_runtime_template" ] }, + "GetNotebookExecutionJob": { + "methods": [ + "get_notebook_execution_job" + ] + }, "GetNotebookRuntime": { "methods": [ "get_notebook_runtime" @@ -3337,6 +3372,11 @@ "get_notebook_runtime_template" ] }, + "ListNotebookExecutionJobs": { + "methods": [ + "list_notebook_execution_jobs" + ] + }, "ListNotebookRuntimeTemplates": { "methods": [ "list_notebook_runtime_templates" @@ -3372,11 +3412,21 @@ "assign_notebook_runtime" ] }, + "CreateNotebookExecutionJob": { + "methods": [ + "create_notebook_execution_job" + ] + }, "CreateNotebookRuntimeTemplate": { "methods": [ "create_notebook_runtime_template" ] }, + "DeleteNotebookExecutionJob": { + "methods": [ + "delete_notebook_execution_job" + ] + }, "DeleteNotebookRuntime": { "methods": [ "delete_notebook_runtime" @@ -3387,6 +3437,11 @@ "delete_notebook_runtime_template" ] }, + "GetNotebookExecutionJob": { + "methods": [ + "get_notebook_execution_job" + ] + }, "GetNotebookRuntime": { "methods": [ "get_notebook_runtime" @@ -3397,6 +3452,11 @@ "get_notebook_runtime_template" ] }, + "ListNotebookExecutionJobs": { + "methods": [ + "list_notebook_execution_jobs" + ] + }, "ListNotebookRuntimeTemplates": { "methods": [ "list_notebook_runtime_templates" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index d7f0ac0249..886029a303 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -250,9 +250,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DatasetServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 70f5d5f8ed..236ffda044 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -692,9 +692,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DatasetServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index c91245bba7..466ebd2a56 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 725c18f4b2..48ecb65967 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index 4fd05e5d19..44b156a89a 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index aceb82a76c..e2be118dc1 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -912,9 +912,6 @@ class DatasetServiceRestTransport(DatasetServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -934,39 +931,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2892,7 +2885,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2901,11 +2894,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2986,7 +2981,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2995,11 +2990,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3081,11 +3078,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3168,11 +3167,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3255,11 +3256,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3337,7 +3340,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3346,11 +3349,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3433,11 +3438,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3519,11 +3526,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3605,11 +3614,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3689,7 +3700,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3698,11 +3709,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3785,11 +3798,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3871,11 +3886,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3957,11 +3974,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4045,11 +4064,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4133,11 +4154,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4222,11 +4245,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4308,11 +4333,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4393,7 +4420,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4402,11 +4429,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4488,7 +4517,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4497,11 +4526,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py index f8a56e423d..106231dc60 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -256,9 +256,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DeploymentResourcePoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py index 594394fc3c..ae39a3fa3d 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py @@ -619,9 +619,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DeploymentResourcePoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py index 68110d42b4..c7ffbd47d2 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py index f96d1df116..ae18d98c5c 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py @@ -131,7 +131,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py index 2f7585a5a2..ffb5e07fc2 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py @@ -178,7 +178,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py index 630119f845..bbd700c8fe 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py @@ -529,9 +529,6 @@ class DeploymentResourcePoolServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -551,39 +548,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2518,7 +2511,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2527,11 +2520,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2620,11 +2615,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2713,11 +2710,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2806,11 +2805,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2902,11 +2903,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2997,7 +3000,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3006,11 +3009,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 9bdb448960..cc6d477249 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -246,9 +246,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index a9f0dc8e0b..8b1a3173c7 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -649,9 +649,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index fd72bf050b..fcdc32d433 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py index 9755742b3e..c1f9fab51e 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py index ad4d7b7e9d..488c241ba0 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py index d92d977bb2..f891893658 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py @@ -569,9 +569,6 @@ class EndpointServiceRestTransport(EndpointServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -591,39 +588,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2549,7 +2542,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2558,11 +2551,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2644,11 +2639,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2726,7 +2723,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2735,11 +2732,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2821,11 +2820,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2907,11 +2908,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2993,7 +2996,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3002,11 +3005,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3085,7 +3090,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3094,11 +3099,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3179,7 +3186,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3188,11 +3195,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py index 90ef0e9423..a5a31c7dca 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py @@ -270,9 +270,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreAdminServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -809,10 +806,11 @@ async def sample_update_feature_online_store(): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1518,7 +1516,14 @@ async def sample_update_feature_view(): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py index 9097e19f07..5ab91a3260 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py @@ -633,9 +633,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreAdminServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1261,10 +1258,11 @@ def sample_update_feature_online_store(): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1959,7 +1957,14 @@ def sample_update_feature_view(): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py index 1b54ef636f..f900635443 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py index cf29a41abf..4a144eafbe 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py index 4220625ac0..0e5427ffd5 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py index 5365b5c2b4..0b646342d6 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py @@ -770,9 +770,6 @@ class FeatureOnlineStoreAdminServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -792,39 +789,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2761,7 +2754,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2770,11 +2763,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2859,7 +2854,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2868,11 +2863,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2961,11 +2958,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3050,11 +3049,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3143,11 +3144,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3234,11 +3237,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3326,11 +3331,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3419,11 +3426,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3513,11 +3522,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3607,11 +3618,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3696,7 +3709,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3705,11 +3718,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3799,7 +3814,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3808,11 +3823,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3895,7 +3912,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3904,11 +3921,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py index 7341ea3433..215a3ea130 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py @@ -233,9 +233,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py index 476fdddf0f..eb2875ed70 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py @@ -560,9 +560,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py index 643faf3e6f..c390eff918 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py index e81cf57f3e..aef2622727 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py index db1ea2ec2f..9e4393bbb6 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py index c7b79a92d8..85dc73d781 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py @@ -388,9 +388,6 @@ class FeatureOnlineStoreServiceRestTransport(FeatureOnlineStoreServiceTransport) It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -410,39 +407,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -534,7 +527,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -543,11 +536,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -631,7 +626,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -640,11 +635,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py index 6dcf41be95..68ea8173c6 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py @@ -242,9 +242,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -744,6 +741,9 @@ async def sample_update_feature_group(): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1431,7 +1431,9 @@ async def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py index d4fbe567fa..b315a0a05f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py @@ -597,9 +597,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1176,6 +1173,9 @@ def sample_update_feature_group(): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1848,7 +1848,9 @@ def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py index 15da7a89a9..07979905c4 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py index 98ed8e0e88..dcb1a89952 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py @@ -132,7 +132,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py index 9692da7bfd..ff1e726812 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py @@ -179,7 +179,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py index 185e22e39f..87c4bd6d8f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py @@ -643,9 +643,6 @@ class FeatureRegistryServiceRestTransport(FeatureRegistryServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -665,39 +662,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2627,7 +2620,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2636,11 +2629,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2723,7 +2718,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2732,11 +2727,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2820,11 +2817,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2907,11 +2906,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2994,11 +2995,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3080,11 +3083,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3168,11 +3173,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3258,11 +3265,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3344,7 +3353,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3353,11 +3362,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3438,7 +3449,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3447,11 +3458,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index 6211b6ccb0..b51922f862 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -241,9 +241,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreOnlineServingServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py index ddcd7aaba1..4cae40a153 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -567,9 +567,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreOnlineServingServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py index c579e9762f..9ff90a6133 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py index e95345ca7c..dfb143d9d4 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -128,7 +128,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py index 115cd13090..bc2d49d051 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -175,7 +175,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py index 1281caa296..94ed18a65c 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py @@ -422,9 +422,6 @@ class FeaturestoreOnlineServingServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -444,39 +441,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -568,7 +561,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -577,11 +570,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -668,7 +663,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -677,11 +672,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -764,7 +761,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -773,11 +770,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index 31058f1cc2..a6fc5dd664 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -247,9 +247,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2254,7 +2251,9 @@ async def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index 39822417e9..6a5f9217fd 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -622,9 +622,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2673,7 +2670,9 @@ def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py index f0da31cdcb..fcdef361c0 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py index e2aa6e1ec3..6dc7d58f0f 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py index 84252a70d7..cfb98bc3c5 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py index f3a338fb8a..a78c3bdb84 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py @@ -992,9 +992,6 @@ class FeaturestoreServiceRestTransport(FeaturestoreServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1014,39 +1011,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2974,7 +2967,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2983,11 +2976,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3068,7 +3063,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3077,11 +3072,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3164,7 +3161,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3173,11 +3170,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3260,7 +3259,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3269,11 +3268,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3356,7 +3357,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3365,11 +3366,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3453,11 +3456,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3540,11 +3545,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3627,11 +3634,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3711,7 +3720,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3720,11 +3729,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3805,7 +3816,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3814,11 +3825,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3903,11 +3916,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3992,11 +4007,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4083,11 +4100,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4169,7 +4188,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4178,11 +4197,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4265,11 +4286,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4355,11 +4378,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4443,11 +4468,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4529,11 +4556,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4618,7 +4647,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4627,11 +4656,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4714,7 +4745,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4723,11 +4754,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4810,7 +4843,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4819,11 +4852,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py index 25c09e3532..d8281e49b1 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py @@ -235,9 +235,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiTuningServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py index 1418af9dd1..2aae977135 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py @@ -630,9 +630,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiTuningServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py index 9aa1203f39..95ffcffba4 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py index 7f4d78def9..f4b812804a 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py index 3da68a1632..90b1172a20 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py index 6a8ebee144..5470a536ea 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py @@ -432,9 +432,6 @@ class GenAiTuningServiceRestTransport(GenAiTuningServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -454,39 +451,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -568,7 +561,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -577,11 +570,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -655,7 +650,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -664,11 +659,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -751,11 +748,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -839,11 +838,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index bee3bf01c5..158363ffd6 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -237,9 +237,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexEndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index 10534efc91..03f7d45248 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -588,9 +588,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexEndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index 6096f49f02..caa8199a41 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index e37f8927fe..31ba861047 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py index 9981e9b1cb..da41fd6da4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py index c120d237a5..64ce281d93 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py @@ -583,9 +583,6 @@ class IndexEndpointServiceRestTransport(IndexEndpointServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -605,39 +602,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2565,7 +2558,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2574,11 +2567,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2662,11 +2657,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2744,7 +2741,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2753,11 +2750,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2841,11 +2840,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2929,11 +2930,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3015,7 +3018,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3024,11 +3027,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3107,7 +3112,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3116,11 +3121,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3203,7 +3210,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3212,11 +3219,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index a3c19ac0d6..68c2add4aa 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -231,9 +231,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index 1157814ac7..dd5abfd2f6 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -585,9 +585,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_service/transports/base.py index e5a9f86971..2795badfeb 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py index 90ffc6fb01..7b1ac7e46d 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py index fd5374254b..31f222c383 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py index 59167474ee..9f75bd2907 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py @@ -535,9 +535,6 @@ class IndexServiceRestTransport(IndexServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -557,39 +554,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2515,7 +2508,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2524,11 +2517,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2610,11 +2605,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2696,11 +2693,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2782,11 +2781,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2867,7 +2868,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2876,11 +2877,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2961,7 +2964,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2970,11 +2973,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3054,7 +3059,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3063,11 +3068,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 73001fddda..527d2fdd41 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -298,9 +298,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the JobServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 62b5ee0a03..4a15f944cb 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -921,9 +921,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the JobServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index a6a55b480d..feefdfc906 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -109,6 +109,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -121,7 +123,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index d529402fc8..e23e7cb577 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -147,7 +147,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index 500a1a6c19..0a6cb4d05a 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -194,7 +194,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py index bacbc97123..bf7ffa7db8 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py @@ -1339,9 +1339,6 @@ class JobServiceRestTransport(JobServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1361,39 +1358,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3315,7 +3308,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3324,11 +3317,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3396,7 +3391,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3405,11 +3400,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3477,7 +3474,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3486,11 +3483,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3559,7 +3558,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3568,11 +3567,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3638,7 +3639,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3647,11 +3648,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3731,7 +3734,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3740,11 +3743,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3832,7 +3837,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3841,11 +3846,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3928,7 +3935,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3937,11 +3944,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4026,7 +4035,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4035,11 +4044,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4130,7 +4141,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4139,11 +4150,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4227,7 +4240,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4236,11 +4249,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4327,11 +4342,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4414,11 +4431,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4501,11 +4520,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4589,11 +4610,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4682,11 +4705,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4767,11 +4792,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4858,11 +4885,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4950,11 +4979,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5039,11 +5070,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5130,11 +5163,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5225,11 +5260,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5313,11 +5350,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5403,11 +5442,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5492,11 +5533,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5580,11 +5623,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5668,11 +5713,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5757,11 +5804,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5851,11 +5900,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5937,11 +5988,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6025,11 +6078,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6110,7 +6165,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6119,11 +6174,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6197,7 +6254,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6206,11 +6263,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6292,7 +6351,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6301,11 +6360,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6402,7 +6463,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6411,11 +6472,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py index 79f46e06a1..47fc10f734 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py @@ -223,9 +223,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the LlmUtilityServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -297,17 +294,8 @@ async def sample_count_tokens(): client = aiplatform_v1.LlmUtilityServiceAsyncClient() # Initialize request argument(s) - instances = aiplatform_v1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request @@ -328,7 +316,7 @@ async def sample_count_tokens(): on the ``request`` instance; if ``request`` is provided, this should not be set. instances (:class:`MutableSequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py index 08b1fc0902..e220df4b68 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py @@ -557,9 +557,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the LlmUtilityServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -717,17 +714,8 @@ def sample_count_tokens(): client = aiplatform_v1.LlmUtilityServiceClient() # Initialize request argument(s) - instances = aiplatform_v1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request @@ -748,7 +736,7 @@ def sample_count_tokens(): on the ``request`` instance; if ``request`` is provided, this should not be set. instances (MutableSequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py index 22a4b580f2..31972749e7 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py @@ -87,6 +87,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -99,7 +101,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py index c8554db89e..598e27b0e5 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py index 9bc3529b46..6c9acd5ba7 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py index 1f1bc2301b..f39544de09 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py @@ -383,9 +383,6 @@ class LlmUtilityServiceRestTransport(LlmUtilityServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -405,39 +402,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -528,7 +521,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -537,11 +530,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -623,7 +618,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -632,11 +627,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/match_service/async_client.py b/google/cloud/aiplatform_v1/services/match_service/async_client.py index b7e14780e9..4e3a959ede 100644 --- a/google/cloud/aiplatform_v1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/match_service/async_client.py @@ -218,9 +218,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MatchServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/match_service/client.py b/google/cloud/aiplatform_v1/services/match_service/client.py index 9c94004d68..be248a4c8c 100644 --- a/google/cloud/aiplatform_v1/services/match_service/client.py +++ b/google/cloud/aiplatform_v1/services/match_service/client.py @@ -552,9 +552,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MatchServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/base.py b/google/cloud/aiplatform_v1/services/match_service/transports/base.py index 0b06fa7a8a..1126dfeb31 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py index 8d15cadf46..f6bf902d8a 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py index b90b2f131e..5c0771620a 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py index 40ab1e353a..31bf611063 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py @@ -380,9 +380,6 @@ class MatchServiceRestTransport(MatchServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -402,39 +399,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -520,7 +513,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -529,11 +522,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -615,7 +610,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -624,11 +619,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index 7e99d5a43c..3ed110fd2d 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -252,9 +252,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MetadataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py index 7bce6ccfd7..173f4eed16 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -670,9 +670,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MetadataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py index b441787d2e..862666c19c 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -97,6 +97,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -109,7 +111,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py index 4a0601e2f4..3d402c52dc 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -138,7 +138,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py index 58761e4cdc..ef4e440a4f 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py @@ -185,7 +185,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py index 344e552586..1216539d47 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py @@ -1325,9 +1325,6 @@ class MetadataServiceRestTransport(MetadataServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1347,39 +1344,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3312,7 +3305,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3321,11 +3314,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3407,7 +3402,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3416,11 +3411,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3502,7 +3499,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3511,11 +3508,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3593,7 +3592,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3602,11 +3601,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3684,7 +3685,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3693,11 +3694,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3777,7 +3780,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3786,11 +3789,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3870,7 +3875,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3879,11 +3884,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3966,7 +3973,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3975,11 +3982,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4061,11 +4070,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4146,11 +4157,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4233,11 +4246,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4320,11 +4335,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4402,11 +4419,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4486,11 +4505,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4570,11 +4591,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4656,11 +4679,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4745,11 +4770,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4831,11 +4858,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4917,11 +4946,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5003,11 +5034,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5091,11 +5124,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5179,11 +5214,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5263,7 +5300,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5272,11 +5309,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5355,7 +5394,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5364,11 +5403,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5449,7 +5490,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5458,11 +5499,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5549,11 +5592,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5639,11 +5684,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5734,11 +5781,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5819,7 +5868,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5828,11 +5877,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5910,7 +5961,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5919,11 +5970,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6001,7 +6054,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6010,11 +6063,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6094,7 +6149,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6103,11 +6158,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index 53c16b2d6b..49cf7cba3b 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -239,9 +239,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MigrationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 06161e2e29..42be200245 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -216,40 +216,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -686,9 +686,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MigrationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index 565f5b1bfb..336d5dc58c 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -87,6 +87,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -99,7 +101,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index d195551f40..b3bb5e98b5 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py index 9674bb0682..dbd7cb2b44 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py index ef79f5a8f4..7fd47c14ce 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py @@ -388,9 +388,6 @@ class MigrationServiceRestTransport(MigrationServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -410,39 +407,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2370,7 +2363,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2379,11 +2372,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2464,7 +2459,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2473,11 +2468,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py index 948f294445..e8eb6231b6 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py @@ -226,9 +226,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelGardenServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1/services/model_garden_service/client.py index 3892842cfd..1410315708 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/client.py @@ -552,9 +552,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelGardenServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py index 63bfe72700..97ed723be6 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py @@ -87,6 +87,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -99,7 +101,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py index 3c46ac9976..a34eab36b0 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py index 032d339161..4a1c843cee 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py index 1b8f5e7d5d..cbf98a1bd8 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py @@ -354,9 +354,6 @@ class ModelGardenServiceRestTransport(ModelGardenServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -376,39 +373,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -497,11 +490,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index dfb5473d6a..97e5ffd135 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -248,9 +248,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index ca2b879a8b..da2d0a487c 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -682,9 +682,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index 3dd91eeae8..e4c9a9d776 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index a81b4cc9fa..2f5e568f6f 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -133,7 +133,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 83db4b2428..1b9d9ce779 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -180,7 +180,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py index 64921255fb..0be898fe91 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py @@ -883,9 +883,6 @@ class ModelServiceRestTransport(ModelServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -905,39 +902,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2870,7 +2863,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2879,11 +2872,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2971,7 +2966,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2980,11 +2975,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3065,7 +3062,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3074,11 +3071,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3160,11 +3159,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3247,11 +3248,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3329,7 +3332,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3338,11 +3341,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3421,11 +3426,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3511,11 +3518,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3602,11 +3611,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3689,7 +3700,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3698,11 +3709,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3787,11 +3800,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3876,11 +3891,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3962,11 +3979,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4050,11 +4069,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4133,7 +4154,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4142,11 +4163,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4230,7 +4253,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4239,11 +4262,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4321,7 +4346,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4330,11 +4355,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4415,7 +4442,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4424,11 +4451,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py index 5d93704cd8..a4494a62c6 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -48,9 +48,14 @@ from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_execution_job +from google.cloud.aiplatform_v1.types import ( + notebook_execution_job as gca_notebook_execution_job, +) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime @@ -61,9 +66,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport from .client import NotebookServiceClient @@ -85,6 +92,12 @@ class NotebookServiceAsyncClient: network_path = staticmethod(NotebookServiceClient.network_path) parse_network_path = staticmethod(NotebookServiceClient.parse_network_path) + notebook_execution_job_path = staticmethod( + NotebookServiceClient.notebook_execution_job_path + ) + parse_notebook_execution_job_path = staticmethod( + NotebookServiceClient.parse_notebook_execution_job_path + ) notebook_runtime_path = staticmethod(NotebookServiceClient.notebook_runtime_path) parse_notebook_runtime_path = staticmethod( NotebookServiceClient.parse_notebook_runtime_path @@ -95,6 +108,8 @@ class NotebookServiceAsyncClient: parse_notebook_runtime_template_path = staticmethod( NotebookServiceClient.parse_notebook_runtime_template_path ) + schedule_path = staticmethod(NotebookServiceClient.schedule_path) + parse_schedule_path = staticmethod(NotebookServiceClient.parse_schedule_path) subnetwork_path = staticmethod(NotebookServiceClient.subnetwork_path) parse_subnetwork_path = staticmethod(NotebookServiceClient.parse_subnetwork_path) common_billing_account_path = staticmethod( @@ -248,9 +263,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the NotebookServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1752,6 +1764,522 @@ async def sample_start_notebook_runtime(): # Done; return the response. return response + async def create_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.CreateNotebookExecutionJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_execution_job: Optional[ + gca_notebook_execution_job.NotebookExecutionJob + ] = None, + notebook_execution_job_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_execution_job = aiplatform_v1.NotebookExecutionJob() + notebook_execution_job.notebook_runtime_template_resource_name = "notebook_runtime_template_resource_name_value" + notebook_execution_job.gcs_output_uri = "gcs_output_uri_value" + notebook_execution_job.execution_user = "execution_user_value" + + request = aiplatform_v1.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job=notebook_execution_job, + ) + + # Make the request + operation = client.create_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateNotebookExecutionJobRequest, dict]]): + The request object. Request message for + [NotebookService.CreateNotebookExecutionJob] + parent (:class:`str`): + Required. The resource name of the Location to create + the NotebookExecutionJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_execution_job (:class:`google.cloud.aiplatform_v1.types.NotebookExecutionJob`): + Required. The NotebookExecutionJob to + create. + + This corresponds to the ``notebook_execution_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_execution_job_id (:class:`str`): + Optional. User specified ID for the + NotebookExecutionJob. + + This corresponds to the ``notebook_execution_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.NotebookExecutionJob` + NotebookExecutionJob represents an instance of a + notebook execution. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_execution_job, notebook_execution_job_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.CreateNotebookExecutionJobRequest): + request = notebook_service.CreateNotebookExecutionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_execution_job is not None: + request.notebook_execution_job = notebook_execution_job + if notebook_execution_job_id is not None: + request.notebook_execution_job_id = notebook_execution_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_notebook_execution_job.NotebookExecutionJob, + metadata_type=notebook_service.CreateNotebookExecutionJobOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.GetNotebookExecutionJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_execution_job.NotebookExecutionJob: + r"""Gets a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_execution_job(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetNotebookExecutionJobRequest, dict]]): + The request object. Request message for + [NotebookService.GetNotebookExecutionJob] + name (:class:`str`): + Required. The name of the + NotebookExecutionJob resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookExecutionJob: + NotebookExecutionJob represents an + instance of a notebook execution. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.GetNotebookExecutionJobRequest): + request = notebook_service.GetNotebookExecutionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_notebook_execution_jobs( + self, + request: Optional[ + Union[notebook_service.ListNotebookExecutionJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookExecutionJobsAsyncPager: + r"""Lists NotebookExecutionJobs in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_notebook_execution_jobs(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookExecutionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_execution_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest, dict]]): + The request object. Request message for + [NotebookService.ListNotebookExecutionJobs] + parent (:class:`str`): + Required. The resource name of the Location from which + to list the NotebookExecutionJobs. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookExecutionJobsAsyncPager: + Response message for + [NotebookService.CreateNotebookExecutionJob] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.ListNotebookExecutionJobsRequest): + request = notebook_service.ListNotebookExecutionJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_notebook_execution_jobs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotebookExecutionJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookExecutionJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteNotebookExecutionJobRequest, dict]]): + The request object. Request message for + [NotebookService.DeleteNotebookExecutionJob] + name (:class:`str`): + Required. The name of the + NotebookExecutionJob resource to be + deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.DeleteNotebookExecutionJobRequest): + request = notebook_service.DeleteNotebookExecutionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/aiplatform_v1/services/notebook_service/client.py b/google/cloud/aiplatform_v1/services/notebook_service/client.py index 239849cb71..6dc1907ace 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/client.py @@ -52,9 +52,14 @@ from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_execution_job +from google.cloud.aiplatform_v1.types import ( + notebook_execution_job as gca_notebook_execution_job, +) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime @@ -65,9 +70,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import NotebookServiceGrpcTransport from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport @@ -221,6 +228,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def notebook_execution_job_path( + project: str, + location: str, + notebook_execution_job: str, + ) -> str: + """Returns a fully-qualified notebook_execution_job string.""" + return "projects/{project}/locations/{location}/notebookExecutionJobs/{notebook_execution_job}".format( + project=project, + location=location, + notebook_execution_job=notebook_execution_job, + ) + + @staticmethod + def parse_notebook_execution_job_path(path: str) -> Dict[str, str]: + """Parses a notebook_execution_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookExecutionJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def notebook_runtime_path( project: str, @@ -265,6 +294,28 @@ def parse_notebook_runtime_template_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def schedule_path( + project: str, + location: str, + schedule: str, + ) -> str: + """Returns a fully-qualified schedule string.""" + return "projects/{project}/locations/{location}/schedules/{schedule}".format( + project=project, + location=location, + schedule=schedule, + ) + + @staticmethod + def parse_schedule_path(path: str) -> Dict[str, str]: + """Parses a schedule path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/schedules/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def subnetwork_path( project: str, @@ -633,9 +684,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the NotebookServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2199,6 +2247,518 @@ def sample_start_notebook_runtime(): # Done; return the response. return response + def create_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.CreateNotebookExecutionJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_execution_job: Optional[ + gca_notebook_execution_job.NotebookExecutionJob + ] = None, + notebook_execution_job_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_execution_job = aiplatform_v1.NotebookExecutionJob() + notebook_execution_job.notebook_runtime_template_resource_name = "notebook_runtime_template_resource_name_value" + notebook_execution_job.gcs_output_uri = "gcs_output_uri_value" + notebook_execution_job.execution_user = "execution_user_value" + + request = aiplatform_v1.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job=notebook_execution_job, + ) + + # Make the request + operation = client.create_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateNotebookExecutionJobRequest, dict]): + The request object. Request message for + [NotebookService.CreateNotebookExecutionJob] + parent (str): + Required. The resource name of the Location to create + the NotebookExecutionJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_execution_job (google.cloud.aiplatform_v1.types.NotebookExecutionJob): + Required. The NotebookExecutionJob to + create. + + This corresponds to the ``notebook_execution_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_execution_job_id (str): + Optional. User specified ID for the + NotebookExecutionJob. + + This corresponds to the ``notebook_execution_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.NotebookExecutionJob` + NotebookExecutionJob represents an instance of a + notebook execution. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_execution_job, notebook_execution_job_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.CreateNotebookExecutionJobRequest): + request = notebook_service.CreateNotebookExecutionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_execution_job is not None: + request.notebook_execution_job = notebook_execution_job + if notebook_execution_job_id is not None: + request.notebook_execution_job_id = notebook_execution_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_notebook_execution_job.NotebookExecutionJob, + metadata_type=notebook_service.CreateNotebookExecutionJobOperationMetadata, + ) + + # Done; return the response. + return response + + def get_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.GetNotebookExecutionJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_execution_job.NotebookExecutionJob: + r"""Gets a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_execution_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetNotebookExecutionJobRequest, dict]): + The request object. Request message for + [NotebookService.GetNotebookExecutionJob] + name (str): + Required. The name of the + NotebookExecutionJob resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookExecutionJob: + NotebookExecutionJob represents an + instance of a notebook execution. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.GetNotebookExecutionJobRequest): + request = notebook_service.GetNotebookExecutionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_notebook_execution_jobs( + self, + request: Optional[ + Union[notebook_service.ListNotebookExecutionJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookExecutionJobsPager: + r"""Lists NotebookExecutionJobs in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_notebook_execution_jobs(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookExecutionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_execution_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest, dict]): + The request object. Request message for + [NotebookService.ListNotebookExecutionJobs] + parent (str): + Required. The resource name of the Location from which + to list the NotebookExecutionJobs. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookExecutionJobsPager: + Response message for + [NotebookService.CreateNotebookExecutionJob] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.ListNotebookExecutionJobsRequest): + request = notebook_service.ListNotebookExecutionJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_notebook_execution_jobs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotebookExecutionJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_notebook_execution_job( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookExecutionJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a NotebookExecutionJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteNotebookExecutionJobRequest, dict]): + The request object. Request message for + [NotebookService.DeleteNotebookExecutionJob] + name (str): + Required. The name of the + NotebookExecutionJob resource to be + deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, notebook_service.DeleteNotebookExecutionJobRequest): + request = notebook_service.DeleteNotebookExecutionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_notebook_execution_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "NotebookServiceClient": return self diff --git a/google/cloud/aiplatform_v1/services/notebook_service/pagers.py b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py index cae91c1715..fe23fb07b1 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py @@ -24,6 +24,7 @@ Iterator, ) +from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_service @@ -288,3 +289,135 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookExecutionJobsPager: + """A pager for iterating through ``list_notebook_execution_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notebook_execution_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotebookExecutionJobs`` requests and continue to iterate + through the ``notebook_execution_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notebook_service.ListNotebookExecutionJobsResponse], + request: notebook_service.ListNotebookExecutionJobsRequest, + response: notebook_service.ListNotebookExecutionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookExecutionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[notebook_service.ListNotebookExecutionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[notebook_execution_job.NotebookExecutionJob]: + for page in self.pages: + yield from page.notebook_execution_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookExecutionJobsAsyncPager: + """A pager for iterating through ``list_notebook_execution_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notebook_execution_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotebookExecutionJobs`` requests and continue to iterate + through the ``notebook_execution_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[notebook_service.ListNotebookExecutionJobsResponse] + ], + request: notebook_service.ListNotebookExecutionJobsRequest, + response: notebook_service.ListNotebookExecutionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookExecutionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[notebook_service.ListNotebookExecutionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[notebook_execution_job.NotebookExecutionJob]: + async def async_generator(): + async for page in self.pages: + for response in page.notebook_execution_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py index 3f9ecb843c..acbbb99a75 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py @@ -27,6 +27,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.location import locations_pb2 # type: ignore @@ -88,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) @@ -188,6 +191,26 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.create_notebook_execution_job: gapic_v1.method.wrap_method( + self.create_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_execution_job: gapic_v1.method.wrap_method( + self.get_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_execution_jobs: gapic_v1.method.wrap_method( + self.list_notebook_execution_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_execution_job: gapic_v1.method.wrap_method( + self.delete_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -318,6 +341,48 @@ def start_notebook_runtime( ]: raise NotImplementedError() + @property + def create_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.CreateNotebookExecutionJobRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.GetNotebookExecutionJobRequest], + Union[ + notebook_execution_job.NotebookExecutionJob, + Awaitable[notebook_execution_job.NotebookExecutionJob], + ], + ]: + raise NotImplementedError() + + @property + def list_notebook_execution_jobs( + self, + ) -> Callable[ + [notebook_service.ListNotebookExecutionJobsRequest], + Union[ + notebook_service.ListNotebookExecutionJobsResponse, + Awaitable[notebook_service.ListNotebookExecutionJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookExecutionJobRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py index 6f10de8d7c..2814b0c76a 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py @@ -25,6 +25,7 @@ import grpc # type: ignore +from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.location import locations_pb2 # type: ignore @@ -130,7 +131,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -586,6 +588,124 @@ def start_notebook_runtime( ) return self._stubs["start_notebook_runtime"] + @property + def create_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.CreateNotebookExecutionJobRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create notebook execution job method over gRPC. + + Creates a NotebookExecutionJob. + + Returns: + Callable[[~.CreateNotebookExecutionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_execution_job" not in self._stubs: + self._stubs[ + "create_notebook_execution_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", + request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_execution_job"] + + @property + def get_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.GetNotebookExecutionJobRequest], + notebook_execution_job.NotebookExecutionJob, + ]: + r"""Return a callable for the get notebook execution job method over gRPC. + + Gets a NotebookExecutionJob. + + Returns: + Callable[[~.GetNotebookExecutionJobRequest], + ~.NotebookExecutionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_execution_job" not in self._stubs: + self._stubs["get_notebook_execution_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", + request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, + response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + ) + return self._stubs["get_notebook_execution_job"] + + @property + def list_notebook_execution_jobs( + self, + ) -> Callable[ + [notebook_service.ListNotebookExecutionJobsRequest], + notebook_service.ListNotebookExecutionJobsResponse, + ]: + r"""Return a callable for the list notebook execution jobs method over gRPC. + + Lists NotebookExecutionJobs in a Location. + + Returns: + Callable[[~.ListNotebookExecutionJobsRequest], + ~.ListNotebookExecutionJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_execution_jobs" not in self._stubs: + self._stubs["list_notebook_execution_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", + request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, + response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + ) + return self._stubs["list_notebook_execution_jobs"] + + @property + def delete_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookExecutionJobRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete notebook execution job method over gRPC. + + Deletes a NotebookExecutionJob. + + Returns: + Callable[[~.DeleteNotebookExecutionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_execution_job" not in self._stubs: + self._stubs[ + "delete_notebook_execution_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", + request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_execution_job"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py index b0dfbc9c74..808de3e2ae 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py @@ -27,6 +27,7 @@ import grpc # type: ignore from grpc.experimental import aio # type: ignore +from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.location import locations_pb2 # type: ignore @@ -177,7 +178,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -597,6 +599,126 @@ def start_notebook_runtime( ) return self._stubs["start_notebook_runtime"] + @property + def create_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.CreateNotebookExecutionJobRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create notebook execution job method over gRPC. + + Creates a NotebookExecutionJob. + + Returns: + Callable[[~.CreateNotebookExecutionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_execution_job" not in self._stubs: + self._stubs[ + "create_notebook_execution_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", + request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_execution_job"] + + @property + def get_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.GetNotebookExecutionJobRequest], + Awaitable[notebook_execution_job.NotebookExecutionJob], + ]: + r"""Return a callable for the get notebook execution job method over gRPC. + + Gets a NotebookExecutionJob. + + Returns: + Callable[[~.GetNotebookExecutionJobRequest], + Awaitable[~.NotebookExecutionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_execution_job" not in self._stubs: + self._stubs["get_notebook_execution_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", + request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, + response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + ) + return self._stubs["get_notebook_execution_job"] + + @property + def list_notebook_execution_jobs( + self, + ) -> Callable[ + [notebook_service.ListNotebookExecutionJobsRequest], + Awaitable[notebook_service.ListNotebookExecutionJobsResponse], + ]: + r"""Return a callable for the list notebook execution jobs method over gRPC. + + Lists NotebookExecutionJobs in a Location. + + Returns: + Callable[[~.ListNotebookExecutionJobsRequest], + Awaitable[~.ListNotebookExecutionJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_execution_jobs" not in self._stubs: + self._stubs["list_notebook_execution_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", + request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, + response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + ) + return self._stubs["list_notebook_execution_jobs"] + + @property + def delete_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookExecutionJobRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete notebook execution job method over gRPC. + + Deletes a NotebookExecutionJob. + + Returns: + Callable[[~.DeleteNotebookExecutionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_execution_job" not in self._stubs: + self._stubs[ + "delete_notebook_execution_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", + request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_execution_job"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -655,6 +777,26 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.create_notebook_execution_job: gapic_v1.method_async.wrap_method( + self.create_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_execution_job: gapic_v1.method_async.wrap_method( + self.get_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_execution_jobs: gapic_v1.method_async.wrap_method( + self.list_notebook_execution_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_execution_job: gapic_v1.method_async.wrap_method( + self.delete_notebook_execution_job, + default_timeout=None, + client_info=client_info, + ), } def close(self): diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py index f21ea79346..ea74582c3a 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py @@ -43,6 +43,7 @@ OptionalRetry = Union[retries.Retry, object, None] # type: ignore +from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_service from google.longrunning import operations_pb2 # type: ignore @@ -83,6 +84,14 @@ def post_assign_notebook_runtime(self, response): logging.log(f"Received response: {response}") return response + def pre_create_notebook_execution_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_notebook_execution_job(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_notebook_runtime_template(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -91,6 +100,14 @@ def post_create_notebook_runtime_template(self, response): logging.log(f"Received response: {response}") return response + def pre_delete_notebook_execution_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_notebook_execution_job(self, response): + logging.log(f"Received response: {response}") + return response + def pre_delete_notebook_runtime(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -107,6 +124,14 @@ def post_delete_notebook_runtime_template(self, response): logging.log(f"Received response: {response}") return response + def pre_get_notebook_execution_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_notebook_execution_job(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_notebook_runtime(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -123,6 +148,14 @@ def post_get_notebook_runtime_template(self, response): logging.log(f"Received response: {response}") return response + def pre_list_notebook_execution_jobs(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_notebook_execution_jobs(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_notebook_runtimes(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -194,6 +227,31 @@ def post_assign_notebook_runtime( """ return response + def pre_create_notebook_execution_job( + self, + request: notebook_service.CreateNotebookExecutionJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.CreateNotebookExecutionJobRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_notebook_execution_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_create_notebook_execution_job( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_notebook_execution_job + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + def pre_create_notebook_runtime_template( self, request: notebook_service.CreateNotebookRuntimeTemplateRequest, @@ -219,6 +277,31 @@ def post_create_notebook_runtime_template( """ return response + def pre_delete_notebook_execution_job( + self, + request: notebook_service.DeleteNotebookExecutionJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.DeleteNotebookExecutionJobRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_notebook_execution_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_notebook_execution_job( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_notebook_execution_job + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + def pre_delete_notebook_runtime( self, request: notebook_service.DeleteNotebookRuntimeRequest, @@ -269,6 +352,31 @@ def post_delete_notebook_runtime_template( """ return response + def pre_get_notebook_execution_job( + self, + request: notebook_service.GetNotebookExecutionJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.GetNotebookExecutionJobRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_notebook_execution_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_notebook_execution_job( + self, response: notebook_execution_job.NotebookExecutionJob + ) -> notebook_execution_job.NotebookExecutionJob: + """Post-rpc interceptor for get_notebook_execution_job + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + def pre_get_notebook_runtime( self, request: notebook_service.GetNotebookRuntimeRequest, @@ -317,6 +425,31 @@ def post_get_notebook_runtime_template( """ return response + def pre_list_notebook_execution_jobs( + self, + request: notebook_service.ListNotebookExecutionJobsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.ListNotebookExecutionJobsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_notebook_execution_jobs + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_notebook_execution_jobs( + self, response: notebook_service.ListNotebookExecutionJobsResponse + ) -> notebook_service.ListNotebookExecutionJobsResponse: + """Post-rpc interceptor for list_notebook_execution_jobs + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + def pre_list_notebook_runtimes( self, request: notebook_service.ListNotebookRuntimesRequest, @@ -680,9 +813,6 @@ class NotebookServiceRestTransport(NotebookServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -702,39 +832,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2662,7 +2788,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2671,11 +2797,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2698,6 +2826,103 @@ def __call__( resp = self._interceptor.post_assign_notebook_runtime(resp) return resp + class _CreateNotebookExecutionJob(NotebookServiceRestStub): + def __hash__(self): + return hash("CreateNotebookExecutionJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.CreateNotebookExecutionJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create notebook execution + job method over HTTP. + + Args: + request (~.notebook_service.CreateNotebookExecutionJobRequest): + The request object. Request message for + [NotebookService.CreateNotebookExecutionJob] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/notebookExecutionJobs", + "body": "notebook_execution_job", + }, + ] + request, metadata = self._interceptor.pre_create_notebook_execution_job( + request, metadata + ) + pb_request = notebook_service.CreateNotebookExecutionJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_notebook_execution_job(resp) + return resp + class _CreateNotebookRuntimeTemplate(NotebookServiceRestStub): def __hash__(self): return hash("CreateNotebookRuntimeTemplate") @@ -2759,7 +2984,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2768,11 +2993,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2795,6 +3022,96 @@ def __call__( resp = self._interceptor.post_create_notebook_runtime_template(resp) return resp + class _DeleteNotebookExecutionJob(NotebookServiceRestStub): + def __hash__(self): + return hash("DeleteNotebookExecutionJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.DeleteNotebookExecutionJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete notebook execution + job method over HTTP. + + Args: + request (~.notebook_service.DeleteNotebookExecutionJobRequest): + The request object. Request message for + [NotebookService.DeleteNotebookExecutionJob] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_notebook_execution_job( + request, metadata + ) + pb_request = notebook_service.DeleteNotebookExecutionJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_notebook_execution_job(resp) + return resp + class _DeleteNotebookRuntime(NotebookServiceRestStub): def __hash__(self): return hash("DeleteNotebookRuntime") @@ -2856,11 +3173,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2946,11 +3265,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2972,6 +3293,97 @@ def __call__( resp = self._interceptor.post_delete_notebook_runtime_template(resp) return resp + class _GetNotebookExecutionJob(NotebookServiceRestStub): + def __hash__(self): + return hash("GetNotebookExecutionJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.GetNotebookExecutionJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_execution_job.NotebookExecutionJob: + r"""Call the get notebook execution + job method over HTTP. + + Args: + request (~.notebook_service.GetNotebookExecutionJobRequest): + The request object. Request message for + [NotebookService.GetNotebookExecutionJob] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_execution_job.NotebookExecutionJob: + NotebookExecutionJob represents an + instance of a notebook execution. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}", + }, + ] + request, metadata = self._interceptor.pre_get_notebook_execution_job( + request, metadata + ) + pb_request = notebook_service.GetNotebookExecutionJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_execution_job.NotebookExecutionJob() + pb_resp = notebook_execution_job.NotebookExecutionJob.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_notebook_execution_job(resp) + return resp + class _GetNotebookRuntime(NotebookServiceRestStub): def __hash__(self): return hash("GetNotebookRuntime") @@ -3034,11 +3446,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3126,11 +3540,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3154,6 +3570,97 @@ def __call__( resp = self._interceptor.post_get_notebook_runtime_template(resp) return resp + class _ListNotebookExecutionJobs(NotebookServiceRestStub): + def __hash__(self): + return hash("ListNotebookExecutionJobs") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.ListNotebookExecutionJobsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_service.ListNotebookExecutionJobsResponse: + r"""Call the list notebook execution + jobs method over HTTP. + + Args: + request (~.notebook_service.ListNotebookExecutionJobsRequest): + The request object. Request message for + [NotebookService.ListNotebookExecutionJobs] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_service.ListNotebookExecutionJobsResponse: + Response message for + [NotebookService.CreateNotebookExecutionJob] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/notebookExecutionJobs", + }, + ] + request, metadata = self._interceptor.pre_list_notebook_execution_jobs( + request, metadata + ) + pb_request = notebook_service.ListNotebookExecutionJobsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_service.ListNotebookExecutionJobsResponse() + pb_resp = notebook_service.ListNotebookExecutionJobsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_notebook_execution_jobs(resp) + return resp + class _ListNotebookRuntimes(NotebookServiceRestStub): def __hash__(self): return hash("ListNotebookRuntimes") @@ -3214,11 +3721,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3305,11 +3814,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3391,7 +3902,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3400,11 +3911,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3492,7 +4005,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3501,11 +4014,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3588,7 +4103,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3597,11 +4112,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3634,6 +4151,16 @@ def assign_notebook_runtime( # In C++ this would require a dynamic_cast return self._AssignNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + @property + def create_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.CreateNotebookExecutionJobRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateNotebookExecutionJob(self._session, self._host, self._interceptor) # type: ignore + @property def create_notebook_runtime_template( self, @@ -3645,6 +4172,16 @@ def create_notebook_runtime_template( # In C++ this would require a dynamic_cast return self._CreateNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookExecutionJobRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteNotebookExecutionJob(self._session, self._host, self._interceptor) # type: ignore + @property def delete_notebook_runtime( self, @@ -3666,6 +4203,17 @@ def delete_notebook_runtime_template( # In C++ this would require a dynamic_cast return self._DeleteNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + @property + def get_notebook_execution_job( + self, + ) -> Callable[ + [notebook_service.GetNotebookExecutionJobRequest], + notebook_execution_job.NotebookExecutionJob, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetNotebookExecutionJob(self._session, self._host, self._interceptor) # type: ignore + @property def get_notebook_runtime( self, @@ -3687,6 +4235,17 @@ def get_notebook_runtime_template( # In C++ this would require a dynamic_cast return self._GetNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + @property + def list_notebook_execution_jobs( + self, + ) -> Callable[ + [notebook_service.ListNotebookExecutionJobsRequest], + notebook_service.ListNotebookExecutionJobsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListNotebookExecutionJobs(self._session, self._host, self._interceptor) # type: ignore + @property def list_notebook_runtimes( self, diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py index ee1eaad72d..312b809f5d 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -253,9 +253,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PersistentResourceServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py index 29982bf60e..b2bbaa0a65 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py @@ -591,9 +591,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PersistentResourceServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py index 9136c9fbf4..7ec2a51781 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py index 3b14171233..932ebaa186 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py index d9befa6eac..dce6311df6 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py index 9800c3c88f..51775c77a2 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py @@ -527,9 +527,6 @@ class PersistentResourceServiceRestTransport(PersistentResourceServiceTransport) It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -549,39 +546,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2514,7 +2507,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2523,11 +2516,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2614,11 +2609,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2705,11 +2702,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2795,11 +2794,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2886,7 +2887,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2895,11 +2896,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2985,7 +2988,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2994,11 +2997,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 1c544acba9..422d4e6917 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -258,9 +258,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PipelineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index a06d5f4e4a..caa4f73d57 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -751,9 +751,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PipelineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 10f8a4b866..7ae0826e1e 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index f4d15e6032..ec5a7bef7e 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -136,7 +136,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index ace50d52a8..cbd94e0509 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -183,7 +183,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py index 1d75041bc8..a5507dda0d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py @@ -683,9 +683,6 @@ class PipelineServiceRestTransport(PipelineServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -705,39 +702,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2666,7 +2659,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2675,11 +2668,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2761,7 +2756,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2770,11 +2765,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2848,7 +2845,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2857,11 +2854,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2929,7 +2928,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2938,11 +2937,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3016,7 +3017,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3025,11 +3026,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3115,7 +3118,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3124,11 +3127,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3214,11 +3219,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3301,11 +3308,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3387,11 +3396,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3479,11 +3490,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3567,11 +3580,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3655,11 +3670,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 291ed0dbd4..ac67ecd5f5 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -232,9 +232,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PredictionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 4ac3d73427..501531423c 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -585,9 +585,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PredictionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index ab29b91f13..17cfae82c6 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 04504a821a..1b5f76579e 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 30d3401bcd..d7d441d2f6 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py index f6078ef04a..b9f56f63de 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py @@ -600,9 +600,6 @@ class PredictionServiceRestTransport(PredictionServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -622,39 +619,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -740,7 +733,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -749,11 +742,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -835,7 +830,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -844,11 +839,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -928,7 +925,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -937,11 +934,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1027,7 +1026,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1036,11 +1035,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1125,7 +1126,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1134,11 +1135,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1270,7 +1273,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1279,11 +1282,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1375,7 +1380,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1384,11 +1389,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1505,7 +1512,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1514,11 +1521,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1683,7 +1692,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1692,11 +1701,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py index 5a98016f9b..9d573e9fd4 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py @@ -246,9 +246,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ScheduleServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/schedule_service/client.py b/google/cloud/aiplatform_v1/services/schedule_service/client.py index bfcd868901..d49bcd201d 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/client.py @@ -701,9 +701,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ScheduleServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py index 09d40c3f6c..69ab6c17b7 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py index 2d310099e0..9eb1d6aa41 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py @@ -133,7 +133,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py index 41e0956b1c..285e7fc68d 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py @@ -180,7 +180,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py index dff67f5542..f87b0f1ad4 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py @@ -511,9 +511,6 @@ class ScheduleServiceRestTransport(ScheduleServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -533,39 +530,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2492,7 +2485,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2501,11 +2494,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2589,11 +2584,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2675,11 +2672,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2761,11 +2760,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2838,7 +2839,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2847,11 +2848,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2917,7 +2920,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2926,11 +2929,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3006,7 +3011,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3015,11 +3020,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 9a006fe6dd..95107d2fc5 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -242,9 +242,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the SpecialistPoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index ffed428ae2..b8b8788fab 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -571,9 +571,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the SpecialistPoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py index 453eab9dae..ceac1bb08e 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py index 42095120cb..2a1376201b 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py index 10d501611e..cc2796ed5a 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py index 54e48ca943..408ea0c328 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py @@ -492,9 +492,6 @@ class SpecialistPoolServiceRestTransport(SpecialistPoolServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -514,39 +511,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2474,7 +2467,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2483,11 +2476,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2571,11 +2566,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2667,11 +2664,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2755,11 +2754,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2843,7 +2844,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2852,11 +2853,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index 8bea6e17e2..cd55f4e543 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -263,9 +263,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the TensorboardServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py index 062416ecd4..1462c2fbde 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -657,9 +657,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the TensorboardServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py index 8bf1ba1909..44790529df 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py @@ -101,6 +101,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -113,7 +115,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py index 51ac956b85..2ace27da51 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py @@ -139,7 +139,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py index f44b14b5fc..7ee58d55b6 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py @@ -186,7 +186,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py index 311ba4ea8c..6a503ac895 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py @@ -1321,9 +1321,6 @@ class TensorboardServiceRestTransport(TensorboardServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1343,39 +1340,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3305,7 +3298,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3314,11 +3307,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3406,7 +3401,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3415,11 +3410,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3516,11 +3513,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3604,7 +3603,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3613,11 +3612,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3704,7 +3705,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3713,11 +3714,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3803,7 +3806,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3812,11 +3815,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3901,7 +3906,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3910,11 +3915,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4000,11 +4007,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4090,11 +4099,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4177,11 +4188,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4267,11 +4280,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4356,7 +4371,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4365,11 +4380,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4458,11 +4475,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4549,11 +4568,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4639,11 +4660,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4728,11 +4751,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4819,11 +4844,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4907,11 +4934,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4995,11 +5024,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5086,11 +5117,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5175,11 +5208,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5262,11 +5297,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5353,11 +5390,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5441,11 +5480,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5529,7 +5570,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5538,11 +5579,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5629,7 +5672,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5638,11 +5681,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5728,7 +5773,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5737,11 +5782,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5828,7 +5875,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5837,11 +5884,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5926,7 +5975,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5935,11 +5984,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6024,7 +6075,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6033,11 +6084,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index ecc79f0d7c..b4fccfc04e 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -232,9 +232,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VizierServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index dc15e77f9a..73acfbc683 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -606,9 +606,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VizierServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py index aea0ce6ebe..f8b13ff7aa 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py index 6d5ac597f9..3b1c25222b 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -135,7 +135,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py index 3679685b7c..11e4d7ed8c 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py @@ -182,7 +182,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py index efc8934235..509dc18974 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py @@ -749,9 +749,6 @@ class VizierServiceRestTransport(VizierServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -771,39 +768,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2733,7 +2726,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2742,11 +2735,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2830,7 +2825,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2839,11 +2834,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2924,7 +2921,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2933,11 +2930,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3015,7 +3014,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3024,11 +3023,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3111,7 +3112,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3120,11 +3121,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3201,11 +3204,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3273,11 +3278,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3349,11 +3356,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3438,11 +3447,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3523,7 +3534,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3532,11 +3543,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3619,11 +3632,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3705,11 +3720,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3786,7 +3803,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3795,11 +3812,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3882,7 +3901,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3891,11 +3910,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3976,7 +3997,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3985,11 +4006,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 0befe0c4b6..42ee8feec0 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -39,11 +39,14 @@ Content, FileData, GenerationConfig, + GroundingChunk, GroundingMetadata, + GroundingSupport, Part, SafetyRating, SafetySetting, SearchEntryPoint, + Segment, VideoMetadata, HarmCategory, ) @@ -623,6 +626,9 @@ from .notebook_euc_config import ( NotebookEucConfig, ) +from .notebook_execution_job import ( + NotebookExecutionJob, +) from .notebook_idle_shutdown_config import ( NotebookIdleShutdownConfig, ) @@ -637,12 +643,18 @@ from .notebook_service import ( AssignNotebookRuntimeOperationMetadata, AssignNotebookRuntimeRequest, + CreateNotebookExecutionJobOperationMetadata, + CreateNotebookExecutionJobRequest, CreateNotebookRuntimeTemplateOperationMetadata, CreateNotebookRuntimeTemplateRequest, + DeleteNotebookExecutionJobRequest, DeleteNotebookRuntimeRequest, DeleteNotebookRuntimeTemplateRequest, + GetNotebookExecutionJobRequest, GetNotebookRuntimeRequest, GetNotebookRuntimeTemplateRequest, + ListNotebookExecutionJobsRequest, + ListNotebookExecutionJobsResponse, ListNotebookRuntimesRequest, ListNotebookRuntimesResponse, ListNotebookRuntimeTemplatesRequest, @@ -654,6 +666,7 @@ UpgradeNotebookRuntimeOperationMetadata, UpgradeNotebookRuntimeRequest, UpgradeNotebookRuntimeResponse, + NotebookExecutionJobView, ) from .openapi import ( Schema, @@ -665,6 +678,7 @@ ) from .persistent_resource import ( PersistentResource, + RayLogsSpec, RayMetricSpec, RaySpec, ResourcePool, @@ -936,11 +950,14 @@ "Content", "FileData", "GenerationConfig", + "GroundingChunk", "GroundingMetadata", + "GroundingSupport", "Part", "SafetyRating", "SafetySetting", "SearchEntryPoint", + "Segment", "VideoMetadata", "HarmCategory", "Context", @@ -1391,6 +1408,7 @@ "NasTrialDetail", "NetworkSpec", "NotebookEucConfig", + "NotebookExecutionJob", "NotebookIdleShutdownConfig", "NotebookRuntime", "NotebookRuntimeTemplate", @@ -1398,12 +1416,18 @@ "NotebookRuntimeTemplateRef", "AssignNotebookRuntimeOperationMetadata", "AssignNotebookRuntimeRequest", + "CreateNotebookExecutionJobOperationMetadata", + "CreateNotebookExecutionJobRequest", "CreateNotebookRuntimeTemplateOperationMetadata", "CreateNotebookRuntimeTemplateRequest", + "DeleteNotebookExecutionJobRequest", "DeleteNotebookRuntimeRequest", "DeleteNotebookRuntimeTemplateRequest", + "GetNotebookExecutionJobRequest", "GetNotebookRuntimeRequest", "GetNotebookRuntimeTemplateRequest", + "ListNotebookExecutionJobsRequest", + "ListNotebookExecutionJobsResponse", "ListNotebookRuntimesRequest", "ListNotebookRuntimesResponse", "ListNotebookRuntimeTemplatesRequest", @@ -1415,11 +1439,13 @@ "UpgradeNotebookRuntimeOperationMetadata", "UpgradeNotebookRuntimeRequest", "UpgradeNotebookRuntimeResponse", + "NotebookExecutionJobView", "Schema", "Type", "DeleteOperationMetadata", "GenericOperationMetadata", "PersistentResource", + "RayLogsSpec", "RayMetricSpec", "RaySpec", "ResourcePool", diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py index d680a6ea04..5e1a77f810 100644 --- a/google/cloud/aiplatform_v1/types/content.py +++ b/google/cloud/aiplatform_v1/types/content.py @@ -40,6 +40,9 @@ "CitationMetadata", "Citation", "Candidate", + "Segment", + "GroundingChunk", + "GroundingSupport", "GroundingMetadata", "SearchEntryPoint", }, @@ -607,6 +610,9 @@ class Candidate(proto.Message): Output only. Index of the candidate. content (google.cloud.aiplatform_v1.types.Content): Output only. Content parts of the candidate. + score (float): + Output only. Confidence score of the + candidate. finish_reason (google.cloud.aiplatform_v1.types.Candidate.FinishReason): Output only. The reason why the model stopped generating tokens. If empty, the model has not @@ -689,6 +695,10 @@ class FinishReason(proto.Enum): number=2, message="Content", ) + score: float = proto.Field( + proto.DOUBLE, + number=8, + ) finish_reason: FinishReason = proto.Field( proto.ENUM, number=3, @@ -716,6 +726,173 @@ class FinishReason(proto.Enum): ) +class Segment(proto.Message): + r"""Segment of the content. + + Attributes: + part_index (int): + Output only. The index of a Part object + within its parent Content object. + start_index (int): + Output only. Start index in the given Part, + measured in bytes. Offset from the start of the + Part, inclusive, starting at zero. + end_index (int): + Output only. End index in the given Part, + measured in bytes. Offset from the start of the + Part, exclusive, starting at zero. + text (str): + Output only. The text corresponding to the + segment from the response. + """ + + part_index: int = proto.Field( + proto.INT32, + number=1, + ) + start_index: int = proto.Field( + proto.INT32, + number=2, + ) + end_index: int = proto.Field( + proto.INT32, + number=3, + ) + text: str = proto.Field( + proto.STRING, + number=4, + ) + + +class GroundingChunk(proto.Message): + r"""Grounding chunk. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + web (google.cloud.aiplatform_v1.types.GroundingChunk.Web): + Grounding chunk from the web. + + This field is a member of `oneof`_ ``chunk_type``. + retrieved_context (google.cloud.aiplatform_v1.types.GroundingChunk.RetrievedContext): + Grounding chunk from context retrieved by the + retrieval tools. + + This field is a member of `oneof`_ ``chunk_type``. + """ + + class Web(proto.Message): + r"""Chunk from the web. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + uri (str): + URI reference of the chunk. + + This field is a member of `oneof`_ ``_uri``. + title (str): + Title of the chunk. + + This field is a member of `oneof`_ ``_title``. + """ + + uri: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + title: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + class RetrievedContext(proto.Message): + r"""Chunk from context retrieved by the retrieval tools. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + uri (str): + URI reference of the attribution. + + This field is a member of `oneof`_ ``_uri``. + title (str): + Title of the attribution. + + This field is a member of `oneof`_ ``_title``. + """ + + uri: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + title: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + web: Web = proto.Field( + proto.MESSAGE, + number=1, + oneof="chunk_type", + message=Web, + ) + retrieved_context: RetrievedContext = proto.Field( + proto.MESSAGE, + number=2, + oneof="chunk_type", + message=RetrievedContext, + ) + + +class GroundingSupport(proto.Message): + r"""Grounding support. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + segment (google.cloud.aiplatform_v1.types.Segment): + Segment of the content this support belongs + to. + + This field is a member of `oneof`_ ``_segment``. + grounding_chunk_indices (MutableSequence[int]): + A list of indices (into 'grounding_chunk') specifying the + citations associated with the claim. For instance [1,3,4] + means that grounding_chunk[1], grounding_chunk[3], + grounding_chunk[4] are the retrieved content attributed to + the claim. + confidence_scores (MutableSequence[float]): + Confidence score of the support references. Ranges from 0 to + 1. 1 is the most confident. This list must have the same + size as the grounding_chunk_indices. + """ + + segment: "Segment" = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message="Segment", + ) + grounding_chunk_indices: MutableSequence[int] = proto.RepeatedField( + proto.INT32, + number=2, + ) + confidence_scores: MutableSequence[float] = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + class GroundingMetadata(proto.Message): r"""Metadata returned to client when grounding is enabled. @@ -730,6 +907,11 @@ class GroundingMetadata(proto.Message): following-up web searches. This field is a member of `oneof`_ ``_search_entry_point``. + grounding_chunks (MutableSequence[google.cloud.aiplatform_v1.types.GroundingChunk]): + List of supporting references retrieved from + specified grounding source. + grounding_supports (MutableSequence[google.cloud.aiplatform_v1.types.GroundingSupport]): + Optional. List of grounding support. """ web_search_queries: MutableSequence[str] = proto.RepeatedField( @@ -742,6 +924,16 @@ class GroundingMetadata(proto.Message): optional=True, message="SearchEntryPoint", ) + grounding_chunks: MutableSequence["GroundingChunk"] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message="GroundingChunk", + ) + grounding_supports: MutableSequence["GroundingSupport"] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message="GroundingSupport", + ) class SearchEntryPoint(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 2b0ce14ada..805fa504f9 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -539,12 +539,35 @@ class Scheduling(proto.Message): gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + strategy (google.cloud.aiplatform_v1.types.Scheduling.Strategy): + Optional. This determines which type of + scheduling strategy to use. disable_retries (bool): Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides ``Scheduling.restart_job_on_worker_restart`` to false. """ + class Strategy(proto.Enum): + r"""Optional. This determines which type of scheduling strategy to use. + Right now users have two options such as ON_DEMAND which will use + regular on demand resources to schedule the job, the other is + LOW_COST which would leverage spot resources alongwith regular + resources to schedule the job. + + Values: + STRATEGY_UNSPECIFIED (0): + Strategy will default to ON_DEMAND. + ON_DEMAND (1): + Regular on-demand provisioning strategy. + LOW_COST (2): + Low cost by making potential use of spot + resources. + """ + STRATEGY_UNSPECIFIED = 0 + ON_DEMAND = 1 + LOW_COST = 2 + timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=1, @@ -554,6 +577,11 @@ class Scheduling(proto.Message): proto.BOOL, number=3, ) + strategy: Strategy = proto.Field( + proto.ENUM, + number=4, + enum=Strategy, + ) disable_retries: bool = proto.Field( proto.BOOL, number=5, diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index c86f3b56d4..bc194116ca 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -43,8 +43,8 @@ class Dataset(proto.Message): Attributes: name (str): - Output only. The resource name of the - Dataset. + Output only. Identifier. The resource name of + the Dataset. display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters diff --git a/google/cloud/aiplatform_v1/types/dataset_version.py b/google/cloud/aiplatform_v1/types/dataset_version.py index a588430593..e9e5438984 100644 --- a/google/cloud/aiplatform_v1/types/dataset_version.py +++ b/google/cloud/aiplatform_v1/types/dataset_version.py @@ -36,8 +36,8 @@ class DatasetVersion(proto.Message): Attributes: name (str): - Output only. The resource name of the - DatasetVersion. + Output only. Identifier. The resource name of + the DatasetVersion. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DatasetVersion was created. diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index b62b1237fa..f77f7c7753 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -137,6 +137,23 @@ class Endpoint(proto.Message): predict_request_response_logging_config (google.cloud.aiplatform_v1.types.PredictRequestResponseLoggingConfig): Configures the request-response logging for online prediction. + dedicated_endpoint_enabled (bool): + If true, the endpoint will be exposed through a dedicated + DNS [Endpoint.dedicated_endpoint_dns]. Your request to the + dedicated DNS will be isolated from other users' traffic and + will have better performance and reliability. Note: Once you + enabled dedicated endpoint, you won't be able to send + request to the shared DNS + {region}-aiplatform.googleapis.com. The limitation will be + removed soon. + dedicated_endpoint_dns (str): + Output only. DNS of the dedicated endpoint. Will only be + populated if dedicated_endpoint_enabled is true. Format: + ``https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog``. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -211,6 +228,22 @@ class Endpoint(proto.Message): message="PredictRequestResponseLoggingConfig", ) ) + dedicated_endpoint_enabled: bool = proto.Field( + proto.BOOL, + number=24, + ) + dedicated_endpoint_dns: str = proto.Field( + proto.STRING, + number=25, + ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=27, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=28, + ) class DeployedModel(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py index 3e732ff1de..4f34cfe48d 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py @@ -234,10 +234,11 @@ class UpdateFeatureOnlineStoreRequest(proto.Message): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` """ feature_online_store: gca_feature_online_store.FeatureOnlineStore = proto.Field( @@ -471,7 +472,14 @@ class UpdateFeatureViewRequest(proto.Message): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` """ feature_view: gca_feature_view.FeatureView = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/feature_online_store_service.py b/google/cloud/aiplatform_v1/types/feature_online_store_service.py index 88c64fca4b..f820dc5caa 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store_service.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store_service.py @@ -255,6 +255,8 @@ class NearestNeighborQuery(proto.Message): be retrieved from feature view for each query. string_filters (MutableSequence[google.cloud.aiplatform_v1.types.NearestNeighborQuery.StringFilter]): Optional. The list of string filters. + numeric_filters (MutableSequence[google.cloud.aiplatform_v1.types.NearestNeighborQuery.NumericFilter]): + Optional. The list of numeric filters. per_crowding_attribute_neighbor_count (int): Optional. Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more @@ -313,6 +315,106 @@ class StringFilter(proto.Message): number=3, ) + class NumericFilter(proto.Message): + r"""Numeric filter is used to search a subset of the entities by using + boolean rules on numeric columns. For example: Database Point 0: + {name: “a” value_int: 42} {name: “b” value_float: 1.0} Database + Point 1: {name: “a” value_int: 10} {name: “b” value_float: 2.0} + Database Point 2: {name: “a” value_int: -1} {name: “b” value_float: + 3.0} Query: {name: “a” value_int: 12 operator: LESS} // Matches + Point 1, 2 {name: “b” value_float: 2.0 operator: EQUAL} // Matches + Point 1 + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value_int (int): + int value type. + + This field is a member of `oneof`_ ``Value``. + value_float (float): + float value type. + + This field is a member of `oneof`_ ``Value``. + value_double (float): + double value type. + + This field is a member of `oneof`_ ``Value``. + name (str): + Required. Column name in BigQuery that used + as filters. + op (google.cloud.aiplatform_v1.types.NearestNeighborQuery.NumericFilter.Operator): + Optional. This MUST be specified for queries + and must NOT be specified for database points. + + This field is a member of `oneof`_ ``_op``. + """ + + class Operator(proto.Enum): + r"""Datapoints for which Operator is true relative to the query’s + Value field will be allowlisted. + + Values: + OPERATOR_UNSPECIFIED (0): + Unspecified operator. + LESS (1): + Entities are eligible if their value is < the + query's. + LESS_EQUAL (2): + Entities are eligible if their value is <= + the query's. + EQUAL (3): + Entities are eligible if their value is == + the query's. + GREATER_EQUAL (4): + Entities are eligible if their value is >= + the query's. + GREATER (5): + Entities are eligible if their value is > the + query's. + NOT_EQUAL (6): + Entities are eligible if their value is != + the query's. + """ + OPERATOR_UNSPECIFIED = 0 + LESS = 1 + LESS_EQUAL = 2 + EQUAL = 3 + GREATER_EQUAL = 4 + GREATER = 5 + NOT_EQUAL = 6 + + value_int: int = proto.Field( + proto.INT64, + number=2, + oneof="Value", + ) + value_float: float = proto.Field( + proto.FLOAT, + number=3, + oneof="Value", + ) + value_double: float = proto.Field( + proto.DOUBLE, + number=4, + oneof="Value", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + op: "NearestNeighborQuery.NumericFilter.Operator" = proto.Field( + proto.ENUM, + number=5, + optional=True, + enum="NearestNeighborQuery.NumericFilter.Operator", + ) + class Parameters(proto.Message): r"""Parameters that can be overrided in each query to tune query latency and recall. @@ -360,6 +462,11 @@ class Parameters(proto.Message): number=4, message=StringFilter, ) + numeric_filters: MutableSequence[NumericFilter] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=NumericFilter, + ) per_crowding_attribute_neighbor_count: int = proto.Field( proto.INT32, number=5, diff --git a/google/cloud/aiplatform_v1/types/feature_registry_service.py b/google/cloud/aiplatform_v1/types/feature_registry_service.py index fcdd9ebd33..2bc4968908 100644 --- a/google/cloud/aiplatform_v1/types/feature_registry_service.py +++ b/google/cloud/aiplatform_v1/types/feature_registry_service.py @@ -219,6 +219,9 @@ class UpdateFeatureGroupRequest(proto.Message): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` """ feature_group: gca_feature_group.FeatureGroup = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index 04bc965cab..db438e0d13 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -1534,8 +1534,10 @@ class UpdateFeatureRequest(proto.Message): - ``description`` - ``labels`` - - ``disable_monitoring`` (Not supported for FeatureRegistry - Feature) + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) """ feature: gca_feature.Feature = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/model_garden_service.py b/google/cloud/aiplatform_v1/types/model_garden_service.py index ed8a088c5c..0f3838a29c 100644 --- a/google/cloud/aiplatform_v1/types/model_garden_service.py +++ b/google/cloud/aiplatform_v1/types/model_garden_service.py @@ -62,11 +62,13 @@ class GetPublisherModelRequest(proto.Message): language_code (str): Optional. The IETF BCP-47 language code representing the language in which the publisher - model's text information should be written in - (see go/bcp47). + model's text information should be written in. view (google.cloud.aiplatform_v1.types.PublisherModelView): Optional. PublisherModel view specifying which fields to read. + is_hugging_face_model (bool): + Optional. Boolean indicates whether the + requested model is a Hugging Face model. """ name: str = proto.Field( @@ -82,6 +84,10 @@ class GetPublisherModelRequest(proto.Message): number=3, enum="PublisherModelView", ) + is_hugging_face_model: bool = proto.Field( + proto.BOOL, + number=5, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_execution_job.py b/google/cloud/aiplatform_v1/types/notebook_execution_job.py new file mode 100644 index 0000000000..ce41c9a7cf --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_execution_job.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import job_state as gca_job_state +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NotebookExecutionJob", + }, +) + + +class NotebookExecutionJob(proto.Message): + r"""NotebookExecutionJob represents an instance of a notebook + execution. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dataform_repository_source (google.cloud.aiplatform_v1.types.NotebookExecutionJob.DataformRepositorySource): + The Dataform Repository pointing to a single + file notebook repository. + + This field is a member of `oneof`_ ``notebook_source``. + gcs_notebook_source (google.cloud.aiplatform_v1.types.NotebookExecutionJob.GcsNotebookSource): + The Cloud Storage url pointing to the ipynb file. Format: + ``gs://bucket/notebook_file.ipynb`` + + This field is a member of `oneof`_ ``notebook_source``. + direct_notebook_source (google.cloud.aiplatform_v1.types.NotebookExecutionJob.DirectNotebookSource): + The contents of an input notebook file. + + This field is a member of `oneof`_ ``notebook_source``. + notebook_runtime_template_resource_name (str): + The NotebookRuntimeTemplate to source compute + configuration from. + + This field is a member of `oneof`_ ``environment_spec``. + gcs_output_uri (str): + The Cloud Storage location to upload the result to. Format: + ``gs://bucket-name`` + + This field is a member of `oneof`_ ``execution_sink``. + execution_user (str): + The user email to run the execution as. Only + supported by Colab runtimes. + + This field is a member of `oneof`_ ``execution_identity``. + service_account (str): + The service account to run the execution as. + + This field is a member of `oneof`_ ``execution_identity``. + name (str): + Output only. The resource name of this NotebookExecutionJob. + Format: + ``projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}`` + display_name (str): + The display name of the NotebookExecutionJob. + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + execution_timeout (google.protobuf.duration_pb2.Duration): + Max running time of the execution job in + seconds (default 86400s / 24 hrs). + schedule_resource_name (str): + Output only. The Schedule resource name if this job is + triggered by one. Format: + ``projects/{project_id}/locations/{location}/schedules/{schedule_id}`` + job_state (google.cloud.aiplatform_v1.types.JobState): + Output only. The state of the + NotebookExecutionJob. + status (google.rpc.status_pb2.Status): + Output only. Populated when the + NotebookExecutionJob is completed. When there is + an error during notebook execution, the error + details are populated. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookExecutionJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookExecutionJob was most recently updated. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to + organize NotebookExecutionJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + + class DataformRepositorySource(proto.Message): + r"""The Dataform Repository containing the input notebook. + + Attributes: + dataform_repository_resource_name (str): + The resource name of the Dataform Repository. Format: + ``projects/{project_id}/locations/{location}/repositories/{repository_id}`` + commit_sha (str): + The commit SHA to read repository with. If + unset, the file will be read at HEAD. + """ + + dataform_repository_resource_name: str = proto.Field( + proto.STRING, + number=1, + ) + commit_sha: str = proto.Field( + proto.STRING, + number=2, + ) + + class GcsNotebookSource(proto.Message): + r"""The Cloud Storage uri for the input notebook. + + Attributes: + uri (str): + The Cloud Storage uri pointing to the ipynb file. Format: + ``gs://bucket/notebook_file.ipynb`` + generation (str): + The version of the Cloud Storage object to + read. If unset, the current version of the + object is read. See + https://cloud.google.com/storage/docs/metadata#generation-number. + """ + + uri: str = proto.Field( + proto.STRING, + number=1, + ) + generation: str = proto.Field( + proto.STRING, + number=2, + ) + + class DirectNotebookSource(proto.Message): + r"""The content of the input notebook in ipynb format. + + Attributes: + content (bytes): + The base64-encoded contents of the input + notebook file. + """ + + content: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + dataform_repository_source: DataformRepositorySource = proto.Field( + proto.MESSAGE, + number=3, + oneof="notebook_source", + message=DataformRepositorySource, + ) + gcs_notebook_source: GcsNotebookSource = proto.Field( + proto.MESSAGE, + number=4, + oneof="notebook_source", + message=GcsNotebookSource, + ) + direct_notebook_source: DirectNotebookSource = proto.Field( + proto.MESSAGE, + number=17, + oneof="notebook_source", + message=DirectNotebookSource, + ) + notebook_runtime_template_resource_name: str = proto.Field( + proto.STRING, + number=14, + oneof="environment_spec", + ) + gcs_output_uri: str = proto.Field( + proto.STRING, + number=8, + oneof="execution_sink", + ) + execution_user: str = proto.Field( + proto.STRING, + number=9, + oneof="execution_identity", + ) + service_account: str = proto.Field( + proto.STRING, + number=18, + oneof="execution_identity", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + execution_timeout: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + schedule_resource_name: str = proto.Field( + proto.STRING, + number=6, + ) + job_state: gca_job_state.JobState = proto.Field( + proto.ENUM, + number=10, + enum=gca_job_state.JobState, + ) + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=19, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_service.py b/google/cloud/aiplatform_v1/types/notebook_service.py index 2dcc78a245..f7249e26be 100644 --- a/google/cloud/aiplatform_v1/types/notebook_service.py +++ b/google/cloud/aiplatform_v1/types/notebook_service.py @@ -19,6 +19,9 @@ import proto # type: ignore +from google.cloud.aiplatform_v1.types import ( + notebook_execution_job as gca_notebook_execution_job, +) from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -27,6 +30,7 @@ __protobuf__ = proto.module( package="google.cloud.aiplatform.v1", manifest={ + "NotebookExecutionJobView", "CreateNotebookRuntimeTemplateRequest", "CreateNotebookRuntimeTemplateOperationMetadata", "GetNotebookRuntimeTemplateRequest", @@ -46,10 +50,34 @@ "StartNotebookRuntimeRequest", "StartNotebookRuntimeOperationMetadata", "StartNotebookRuntimeResponse", + "CreateNotebookExecutionJobRequest", + "CreateNotebookExecutionJobOperationMetadata", + "GetNotebookExecutionJobRequest", + "ListNotebookExecutionJobsRequest", + "ListNotebookExecutionJobsResponse", + "DeleteNotebookExecutionJobRequest", }, ) +class NotebookExecutionJobView(proto.Enum): + r"""Views for Get/List NotebookExecutionJob + + Values: + NOTEBOOK_EXECUTION_JOB_VIEW_UNSPECIFIED (0): + When unspecified, the API defaults to the + BASIC view. + NOTEBOOK_EXECUTION_JOB_VIEW_BASIC (1): + Includes all fields except for direct + notebook inputs. + NOTEBOOK_EXECUTION_JOB_VIEW_FULL (2): + Includes all fields. + """ + NOTEBOOK_EXECUTION_JOB_VIEW_UNSPECIFIED = 0 + NOTEBOOK_EXECUTION_JOB_VIEW_BASIC = 1 + NOTEBOOK_EXECUTION_JOB_VIEW_FULL = 2 + + class CreateNotebookRuntimeTemplateRequest(proto.Message): r"""Request message for [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. @@ -623,4 +651,203 @@ class StartNotebookRuntimeResponse(proto.Message): """ +class CreateNotebookExecutionJobRequest(proto.Message): + r"""Request message for [NotebookService.CreateNotebookExecutionJob] + + Attributes: + parent (str): + Required. The resource name of the Location to create the + NotebookExecutionJob. Format: + ``projects/{project}/locations/{location}`` + notebook_execution_job (google.cloud.aiplatform_v1.types.NotebookExecutionJob): + Required. The NotebookExecutionJob to create. + notebook_execution_job_id (str): + Optional. User specified ID for the + NotebookExecutionJob. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + notebook_execution_job: gca_notebook_execution_job.NotebookExecutionJob = ( + proto.Field( + proto.MESSAGE, + number=2, + message=gca_notebook_execution_job.NotebookExecutionJob, + ) + ) + notebook_execution_job_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateNotebookExecutionJobOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.CreateNotebookExecutionJob][google.cloud.aiplatform.v1.NotebookService.CreateNotebookExecutionJob]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetNotebookExecutionJobRequest(proto.Message): + r"""Request message for [NotebookService.GetNotebookExecutionJob] + + Attributes: + name (str): + Required. The name of the + NotebookExecutionJob resource. + view (google.cloud.aiplatform_v1.types.NotebookExecutionJobView): + Optional. The NotebookExecutionJob view. + Defaults to BASIC. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + view: "NotebookExecutionJobView" = proto.Field( + proto.ENUM, + number=6, + enum="NotebookExecutionJobView", + ) + + +class ListNotebookExecutionJobsRequest(proto.Message): + r"""Request message for [NotebookService.ListNotebookExecutionJobs] + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the NotebookExecutionJobs. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``notebookExecutionJob`` supports = and !=. + ``notebookExecutionJob`` represents the + NotebookExecutionJob ID. + - ``displayName`` supports = and != and regex. + - ``schedule`` supports = and != and regex. + + Some examples: + + - ``notebookExecutionJob="123"`` + - ``notebookExecutionJob="my-execution-job"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via [ListNotebookExecutionJobs.next_page_token][] of the + previous + [NotebookService.ListNotebookExecutionJobs][google.cloud.aiplatform.v1.NotebookService.ListNotebookExecutionJobs] + call. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + view (google.cloud.aiplatform_v1.types.NotebookExecutionJobView): + Optional. The NotebookExecutionJob view. + Defaults to BASIC. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + view: "NotebookExecutionJobView" = proto.Field( + proto.ENUM, + number=6, + enum="NotebookExecutionJobView", + ) + + +class ListNotebookExecutionJobsResponse(proto.Message): + r"""Response message for [NotebookService.CreateNotebookExecutionJob] + + Attributes: + notebook_execution_jobs (MutableSequence[google.cloud.aiplatform_v1.types.NotebookExecutionJob]): + List of NotebookExecutionJobs in the + requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListNotebookExecutionJobs.page_token][] to obtain that + page. + """ + + @property + def raw_page(self): + return self + + notebook_execution_jobs: MutableSequence[ + gca_notebook_execution_job.NotebookExecutionJob + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_notebook_execution_job.NotebookExecutionJob, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteNotebookExecutionJobRequest(proto.Message): + r"""Request message for [NotebookService.DeleteNotebookExecutionJob] + + Attributes: + name (str): + Required. The name of the + NotebookExecutionJob resource to be deleted. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/persistent_resource.py b/google/cloud/aiplatform_v1/types/persistent_resource.py index 6267420fa9..278a06d0a1 100644 --- a/google/cloud/aiplatform_v1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1/types/persistent_resource.py @@ -35,6 +35,7 @@ "ResourceRuntime", "ServiceAccountSpec", "RayMetricSpec", + "RayLogsSpec", }, ) @@ -389,6 +390,8 @@ class RaySpec(proto.Message): head node by default if this field isn't set. ray_metric_spec (google.cloud.aiplatform_v1.types.RayMetricSpec): Optional. Ray metrics configurations. + ray_logs_spec (google.cloud.aiplatform_v1.types.RayLogsSpec): + Optional. OSS Ray logging configurations. """ image_uri: str = proto.Field( @@ -409,6 +412,11 @@ class RaySpec(proto.Message): number=8, message="RayMetricSpec", ) + ray_logs_spec: "RayLogsSpec" = proto.Field( + proto.MESSAGE, + number=10, + message="RayLogsSpec", + ) class ResourceRuntime(proto.Message): @@ -481,4 +489,19 @@ class RayMetricSpec(proto.Message): ) +class RayLogsSpec(proto.Message): + r"""Configuration for the Ray OSS Logs. + + Attributes: + disabled (bool): + Optional. Flag to disable the export of Ray + OSS logs to Cloud Logging. + """ + + disabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index f7cf835b25..be63fe9d54 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -678,21 +678,37 @@ class ExplainResponse(proto.Message): class CountTokensRequest(proto.Message): r"""Request message for [PredictionService.CountTokens][]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: endpoint (str): Required. The name of the Endpoint requested to perform token counting. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` model (str): - Required. The name of the publisher model requested to serve + Optional. The name of the publisher model requested to serve the prediction. Format: ``projects/{project}/locations/{location}/publishers/*/models/*`` instances (MutableSequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. contents (MutableSequence[google.cloud.aiplatform_v1.types.Content]): - Required. Input content. + Optional. Input content. + system_instruction (google.cloud.aiplatform_v1.types.Content): + Optional. The user provided system + instructions for the model. Note: only text + should be used in parts and content in each part + will be in a separate paragraph. + + This field is a member of `oneof`_ ``_system_instruction``. + tools (MutableSequence[google.cloud.aiplatform_v1.types.Tool]): + Optional. A list of ``Tools`` the model may use to generate + the next response. + + A ``Tool`` is a piece of code that enables the system to + interact with external systems to perform an action, or set + of actions, outside of knowledge and scope of the model. """ endpoint: str = proto.Field( @@ -713,6 +729,17 @@ class CountTokensRequest(proto.Message): number=4, message=content.Content, ) + system_instruction: content.Content = proto.Field( + proto.MESSAGE, + number=5, + optional=True, + message=content.Content, + ) + tools: MutableSequence[tool.Tool] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=tool.Tool, + ) class CountTokensResponse(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index bfbaa48b92..042ad147ad 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -292,7 +292,7 @@ class Parameter(proto.Message): class TrialContext(proto.Message): - r"""Next ID: 3 + r""" Attributes: description (str): diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 0da95b569b..208a2235be 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -214,10 +214,8 @@ class Retrieval(proto.Message): This field is a member of `oneof`_ ``source``. disable_attribution (bool): - Optional. Disable using the result from this - tool in detecting grounding attribution. This - does not affect how the result is given to the - model for generation. + Optional. Deprecated. This option is no + longer supported. """ vertex_ai_search: "VertexAISearch" = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/tuning_job.py b/google/cloud/aiplatform_v1/types/tuning_job.py index ad6caefb26..b76de4901e 100644 --- a/google/cloud/aiplatform_v1/types/tuning_job.py +++ b/google/cloud/aiplatform_v1/types/tuning_job.py @@ -228,6 +228,9 @@ class SupervisedTuningDatasetDistribution(proto.Message): sum (int): Output only. Sum of a given population of values. + billable_sum (int): + Output only. Sum of a given population of + values that are billable. min_ (float): Output only. The minimum of the population values. @@ -280,6 +283,10 @@ class DatasetBucket(proto.Message): proto.INT64, number=1, ) + billable_sum: int = proto.Field( + proto.INT64, + number=9, + ) min_: float = proto.Field( proto.DOUBLE, number=2, @@ -324,6 +331,9 @@ class SupervisedTuningDataStats(proto.Message): total_billable_character_count (int): Output only. Number of billable characters in the tuning dataset. + total_billable_token_count (int): + Output only. Number of billable tokens in the + tuning dataset. tuning_step_count (int): Output only. Number of tuning steps for this Tuning Job. @@ -353,6 +363,10 @@ class SupervisedTuningDataStats(proto.Message): proto.INT64, number=3, ) + total_billable_token_count: int = proto.Field( + proto.INT64, + number=9, + ) tuning_step_count: int = proto.Field( proto.INT64, number=4, diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 25baaa6cb5..28554ac2a3 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -108,6 +108,7 @@ from .types.accelerator_type import AcceleratorType from .types.annotation import Annotation from .types.annotation_spec import AnnotationSpec +from .types.api_auth import ApiAuth from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.cached_content import CachedContent @@ -522,6 +523,8 @@ from .types.io import GcsDestination from .types.io import GcsSource from .types.io import GoogleDriveSource +from .types.io import JiraSource +from .types.io import SlackSource from .types.io import TFRecordDestination from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest @@ -789,6 +792,7 @@ from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata from .types.persistent_resource import PersistentResource +from .types.persistent_resource import RayLogsSpec from .types.persistent_resource import RayMetricSpec from .types.persistent_resource import RaySpec from .types.persistent_resource import ResourcePool @@ -840,6 +844,9 @@ from .types.prediction_service import ExplainResponse from .types.prediction_service import GenerateContentRequest from .types.prediction_service import GenerateContentResponse +from .types.prediction_service import GenerateVideoResponse +from .types.prediction_service import PredictLongRunningMetadata +from .types.prediction_service import PredictLongRunningResponse from .types.prediction_service import PredictRequest from .types.prediction_service import PredictResponse from .types.prediction_service import RawPredictRequest @@ -877,6 +884,7 @@ from .types.schedule_service import UpdateScheduleRequest from .types.service_networking import PrivateServiceConnectConfig from .types.service_networking import PscAutomatedEndpoints +from .types.service_networking import PscInterfaceConfig from .types.specialist_pool import SpecialistPool from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata from .types.specialist_pool_service import CreateSpecialistPoolRequest @@ -966,6 +974,11 @@ from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline +from .types.tuning_job import DatasetDistribution +from .types.tuning_job import DatasetStats +from .types.tuning_job import DistillationDataStats +from .types.tuning_job import DistillationHyperParameters +from .types.tuning_job import DistillationSpec from .types.tuning_job import SupervisedHyperParameters from .types.tuning_job import SupervisedTuningDatasetDistribution from .types.tuning_job import SupervisedTuningDataStats @@ -1076,6 +1089,7 @@ "AddTrialMeasurementRequest", "Annotation", "AnnotationSpec", + "ApiAuth", "Artifact", "AssignNotebookRuntimeOperationMetadata", "AssignNotebookRuntimeRequest", @@ -1226,7 +1240,9 @@ "DataItemView", "DataLabelingJob", "Dataset", + "DatasetDistribution", "DatasetServiceClient", + "DatasetStats", "DatasetVersion", "DedicatedResources", "DeleteArtifactRequest", @@ -1300,6 +1316,9 @@ "DirectRawPredictResponse", "DirectUploadSource", "DiskSpec", + "DistillationDataStats", + "DistillationHyperParameters", + "DistillationSpec", "DoubleArray", "EncryptionSpec", "Endpoint", @@ -1397,6 +1416,7 @@ "GenAiTuningServiceClient", "GenerateContentRequest", "GenerateContentResponse", + "GenerateVideoResponse", "GenerationConfig", "GenericOperationMetadata", "GenieSource", @@ -1490,6 +1510,7 @@ "InputDataConfig", "Int64Array", "IntegratedGradientsAttribution", + "JiraSource", "JobServiceClient", "JobState", "LargeModelReference", @@ -1705,6 +1726,8 @@ "PipelineTemplateMetadata", "Port", "PredefinedSplit", + "PredictLongRunningMetadata", + "PredictLongRunningResponse", "PredictRequest", "PredictRequestResponseLoggingConfig", "PredictResponse", @@ -1715,6 +1738,7 @@ "PrivateServiceConnectConfig", "Probe", "PscAutomatedEndpoints", + "PscInterfaceConfig", "PublisherModel", "PublisherModelView", "PurgeArtifactsMetadata", @@ -1759,6 +1783,7 @@ "RagFileChunkingConfig", "RagQuery", "RawPredictRequest", + "RayLogsSpec", "RayMetricSpec", "RaySpec", "ReadFeatureValuesRequest", @@ -1834,6 +1859,7 @@ "Segment", "ServiceAccountSpec", "ShieldedVmConfig", + "SlackSource", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index d27f49c06c..2d6a83d76e 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.60.0" # {x-release-please-version} +__version__ = "1.61.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index d6fd8ec83f..021b74969d 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -250,9 +250,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DatasetServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index 7c81d0810b..64e4ec3d9b 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -692,9 +692,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DatasetServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 628905029d..83c5481201 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 18934cb685..f0004d336d 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index 7a3f1ec776..f8981a881a 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py index eb8b690287..39cbeaa6b4 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py @@ -912,9 +912,6 @@ class DatasetServiceRestTransport(DatasetServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -934,39 +931,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3108,7 +3101,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3117,11 +3110,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3202,7 +3197,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3211,11 +3206,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3297,11 +3294,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3384,11 +3383,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3471,11 +3472,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3553,7 +3556,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3562,11 +3565,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3649,11 +3654,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3735,11 +3742,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3821,11 +3830,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3905,7 +3916,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3914,11 +3925,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4001,11 +4014,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4087,11 +4102,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4173,11 +4190,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4261,11 +4280,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4349,11 +4370,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4438,11 +4461,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4524,11 +4549,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4609,7 +4636,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4618,11 +4645,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4704,7 +4733,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4713,11 +4742,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py index dd1abf9b0c..338e7736be 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py @@ -258,9 +258,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DeploymentResourcePoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py index d1ffbcdedf..3f9ca5953e 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py @@ -621,9 +621,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the DeploymentResourcePoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py index a2792f8a83..dd5aabd702 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py index 8deca93d06..f59a6fc287 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py @@ -131,7 +131,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py index 498e5fe493..f35922fc08 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py @@ -178,7 +178,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py index ca85282ea6..4f00953606 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py @@ -529,9 +529,6 @@ class DeploymentResourcePoolServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -551,39 +548,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2734,7 +2727,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2743,11 +2736,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2836,11 +2831,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2929,11 +2926,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3022,11 +3021,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3118,11 +3119,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3213,7 +3216,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3222,11 +3225,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 28a29497e5..da74fc673f 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -246,9 +246,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index c1df1e76b0..adbc691cbd 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -649,9 +649,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index f33ecf8c45..8e696c11a7 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index 7febbd993d..b3a957e508 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py index 33b088f277..f78058edf3 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py index da5a333b13..339714e3d9 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py @@ -569,9 +569,6 @@ class EndpointServiceRestTransport(EndpointServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -591,39 +588,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2765,7 +2758,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2774,11 +2767,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2860,11 +2855,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2942,7 +2939,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2951,11 +2948,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3037,11 +3036,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3123,11 +3124,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3209,7 +3212,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3218,11 +3221,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3301,7 +3306,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3310,11 +3315,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3395,7 +3402,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3404,11 +3411,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py index 912f2086f1..dc119dbf08 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py @@ -219,9 +219,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EvaluationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py index 15b3835333..576be30f2b 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py @@ -533,9 +533,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the EvaluationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py index f29ab695a8..ee57ab5295 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py index 9706d14174..95f8c7e6db 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py index 62446757f9..2d116b336c 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py index 5402276e46..3e50571edf 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py @@ -351,9 +351,6 @@ class EvaluationServiceRestTransport(EvaluationServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -373,39 +370,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -493,7 +486,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -502,11 +495,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py index 1196e0c767..e04f349924 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py @@ -239,9 +239,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ExtensionExecutionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py index e9b084532d..acb3e4149b 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py @@ -580,9 +580,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ExtensionExecutionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py index 964e8091a9..595f058eac 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py index a6b53a247a..50cf5924f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py index f062063cea..aac9de74c3 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py index 04e041e8fa..6d9e397741 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py @@ -386,9 +386,6 @@ class ExtensionExecutionServiceRestTransport(ExtensionExecutionServiceTransport) It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -408,39 +405,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -528,7 +521,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -537,11 +530,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -621,7 +616,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -630,11 +625,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py index 8c442b726b..a3eb3c4c23 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py @@ -248,9 +248,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ExtensionRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py index f133345d28..c9b17ad38c 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py @@ -613,9 +613,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ExtensionRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py index 14a05bb861..488f8b5eed 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py index c0a668ec1f..66d89bddf3 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py index 029555b6ce..1f0467dc00 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py index 6de9414494..6cedad2a49 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py @@ -486,9 +486,6 @@ class ExtensionRegistryServiceRestTransport(ExtensionRegistryServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -508,39 +505,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2687,11 +2680,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2772,11 +2767,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2858,7 +2855,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2867,11 +2864,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2952,11 +2951,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3040,7 +3041,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3049,11 +3050,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py index f1e8656031..87e702095d 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py @@ -270,9 +270,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreAdminServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -809,10 +806,11 @@ async def sample_update_feature_online_store(): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1518,7 +1516,14 @@ async def sample_update_feature_view(): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py index 1d42182b00..0ccf5d784d 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py @@ -633,9 +633,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreAdminServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1261,10 +1258,11 @@ def sample_update_feature_online_store(): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1959,7 +1957,14 @@ def sample_update_feature_view(): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py index da33ba9337..73362028f3 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py index 10ae9380f8..3dae1e8e1f 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py index 6ac8e900d1..507f589390 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py index 75848c0bb5..ccd00acb92 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py @@ -770,9 +770,6 @@ class FeatureOnlineStoreAdminServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -792,39 +789,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2977,7 +2970,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2986,11 +2979,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3075,7 +3070,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3084,11 +3079,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3177,11 +3174,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3266,11 +3265,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3359,11 +3360,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3450,11 +3453,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3542,11 +3547,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3635,11 +3642,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3729,11 +3738,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3823,11 +3834,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3912,7 +3925,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3921,11 +3934,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4015,7 +4030,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4024,11 +4039,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4111,7 +4128,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4120,11 +4137,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py index b457b168eb..d876da3b67 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py @@ -237,9 +237,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py index 28d8332855..8222538642 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py @@ -563,9 +563,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureOnlineStoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py index 2832b48897..9b867797a4 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py index 14ed444649..8f30c9b586 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py index d4d1177eaa..52bccfaa0d 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py index 8711c71d73..a674f03fe8 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py @@ -388,9 +388,6 @@ class FeatureOnlineStoreServiceRestTransport(FeatureOnlineStoreServiceTransport) It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -410,39 +407,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -534,7 +527,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -543,11 +536,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -631,7 +626,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -640,11 +635,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py index 08ee762639..511a4efafe 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py @@ -244,9 +244,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -746,6 +743,9 @@ async def sample_update_feature_group(): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1433,7 +1433,9 @@ async def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py index 9c8db18f18..63d0d404e7 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py @@ -599,9 +599,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeatureRegistryServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1178,6 +1175,9 @@ def sample_update_feature_group(): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1850,7 +1850,9 @@ def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/base.py index ab8ec4a45f..6ebbc54bad 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc.py index c5ba30658a..c3450e86cd 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc.py @@ -132,7 +132,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc_asyncio.py index 56f18c1a2b..4b8e1811d6 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/grpc_asyncio.py @@ -179,7 +179,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py index 46ea9cf151..cf5f5e6337 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py @@ -643,9 +643,6 @@ class FeatureRegistryServiceRestTransport(FeatureRegistryServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -665,39 +662,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2843,7 +2836,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2852,11 +2845,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2939,7 +2934,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2948,11 +2943,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3036,11 +3033,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3123,11 +3122,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3210,11 +3211,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3296,11 +3299,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3384,11 +3389,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3474,11 +3481,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3560,7 +3569,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3569,11 +3578,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3654,7 +3665,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3663,11 +3674,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py index 7af487ac54..79a03c63d4 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -241,9 +241,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreOnlineServingServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py index 875e30a4c0..227aec7a07 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -567,9 +567,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreOnlineServingServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py index 14b5e34326..a161015fc2 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py index aa437239a9..d15f134842 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -128,7 +128,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py index 125a1f3129..a1d6dcdbe7 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -175,7 +175,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py index 8747e39783..9dc7596459 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py @@ -422,9 +422,6 @@ class FeaturestoreOnlineServingServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -444,39 +441,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -568,7 +561,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -577,11 +570,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -668,7 +663,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -677,11 +672,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -764,7 +761,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -773,11 +770,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index cdf6d852c5..a3dacd250f 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -248,9 +248,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2255,7 +2252,9 @@ async def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index c0d3d706cc..b6257dc5a8 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -623,9 +623,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the FeaturestoreServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2674,7 +2671,9 @@ def sample_update_feature(): - ``description`` - ``labels`` - ``disable_monitoring`` (Not supported for - FeatureRegistry Feature) + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py index 34f2d80fb7..a4e72ffe90 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py index 9fd42824a3..4e9cf9ae62 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py index b5fac80296..b61c8bde3f 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py index 0488f7f28b..5fd9b149f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py @@ -992,9 +992,6 @@ class FeaturestoreServiceRestTransport(FeaturestoreServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1014,39 +1011,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3190,7 +3183,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3199,11 +3192,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3284,7 +3279,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3293,11 +3288,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3380,7 +3377,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3389,11 +3386,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3476,7 +3475,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3485,11 +3484,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3572,7 +3573,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3581,11 +3582,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3669,11 +3672,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3756,11 +3761,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3843,11 +3850,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3927,7 +3936,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3936,11 +3945,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4021,7 +4032,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4030,11 +4041,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4119,11 +4132,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4208,11 +4223,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4299,11 +4316,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4385,7 +4404,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4394,11 +4413,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4481,11 +4502,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4571,11 +4594,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4659,11 +4684,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4745,11 +4772,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4834,7 +4863,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4843,11 +4872,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4930,7 +4961,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4939,11 +4970,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5026,7 +5059,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5035,11 +5068,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py index ead0ddfc58..084086794d 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py @@ -233,9 +233,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiCacheServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py index ef9a3510fc..3168996e6f 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py @@ -585,9 +585,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiCacheServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py index 553b3648d6..61651da782 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py index 15d9a8dc5f..186ae95cdc 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py index 2775f57f6d..a2b9bdc1b3 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py index cf63362920..f064b90db2 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py @@ -471,9 +471,6 @@ class GenAiCacheServiceRestTransport(GenAiCacheServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -493,39 +490,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -614,7 +607,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -623,11 +616,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -706,11 +701,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -787,11 +784,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -874,11 +873,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -963,7 +964,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -972,11 +973,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py index 6a14f8694c..21e7236e3a 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py @@ -79,6 +79,10 @@ class GenAiTuningServiceAsyncClient: parse_endpoint_path = staticmethod(GenAiTuningServiceClient.parse_endpoint_path) model_path = staticmethod(GenAiTuningServiceClient.model_path) parse_model_path = staticmethod(GenAiTuningServiceClient.parse_model_path) + pipeline_job_path = staticmethod(GenAiTuningServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod( + GenAiTuningServiceClient.parse_pipeline_job_path + ) tuning_job_path = staticmethod(GenAiTuningServiceClient.tuning_job_path) parse_tuning_job_path = staticmethod(GenAiTuningServiceClient.parse_tuning_job_path) common_billing_account_path = staticmethod( @@ -235,9 +239,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiTuningServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py index 04c21b78e1..6e8d0c3803 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py @@ -260,6 +260,28 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def pipeline_job_path( + project: str, + location: str, + pipeline_job: str, + ) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, + location=location, + pipeline_job=pipeline_job, + ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str, str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def tuning_job_path( project: str, @@ -630,9 +652,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the GenAiTuningServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py index fe1215688b..2f8cf4b841 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py index 89d3004e8e..c3b3e8e9ff 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py index 937129327d..20401ad6ac 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py index 633ff6f118..3942e3f73a 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py @@ -432,9 +432,6 @@ class GenAiTuningServiceRestTransport(GenAiTuningServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -454,39 +451,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -568,7 +561,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -577,11 +570,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -655,7 +650,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -664,11 +659,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -751,11 +748,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -839,11 +838,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 18cadbd1ca..627acaa655 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -237,9 +237,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexEndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index f3e602ff42..d7003433e4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -588,9 +588,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexEndpointServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index 561a310a0e..2c2106d4ba 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index 9607c4d526..d37bc9fab4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index 4799a1e121..af27d49780 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py index 3d188f0cc2..fba08e6015 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py @@ -583,9 +583,6 @@ class IndexEndpointServiceRestTransport(IndexEndpointServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -605,39 +602,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2781,7 +2774,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2790,11 +2783,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2878,11 +2873,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2960,7 +2957,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2969,11 +2966,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3057,11 +3056,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3145,11 +3146,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3231,7 +3234,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3240,11 +3243,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3323,7 +3328,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3332,11 +3337,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3419,7 +3426,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3428,11 +3435,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py index 389f3807f1..a27de9bca4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -231,9 +231,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py index cf715bdc91..418df013ea 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -585,9 +585,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the IndexServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py index 32bf51e2e0..8489dfae21 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py index 33d16a6b7b..f8da5027ed 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py index 4c4d28974a..f9f213333b 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py index b347df3a85..0e7712eb00 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py @@ -535,9 +535,6 @@ class IndexServiceRestTransport(IndexServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -557,39 +554,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2731,7 +2724,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2740,11 +2733,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2826,11 +2821,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2912,11 +2909,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2998,11 +2997,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3083,7 +3084,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3092,11 +3093,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3177,7 +3180,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3186,11 +3189,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3270,7 +3275,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3279,11 +3284,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 758b4446a7..2d3b7402ce 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -300,9 +300,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the JobServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index 8f1fd51d84..f15e57ca85 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -923,9 +923,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the JobServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 147990a0c7..5458d78148 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -111,6 +111,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -123,7 +125,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index c07769d96c..8b2a1d07af 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -149,7 +149,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 793f7b3e9f..1fb7c6ebed 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -196,7 +196,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py index e40a54de38..57408dd8ed 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py @@ -1341,9 +1341,6 @@ class JobServiceRestTransport(JobServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1363,39 +1360,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3533,7 +3526,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3542,11 +3535,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3614,7 +3609,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3623,11 +3618,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3695,7 +3692,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3704,11 +3701,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3777,7 +3776,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3786,11 +3785,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3856,7 +3857,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3865,11 +3866,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3949,7 +3952,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3958,11 +3961,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4050,7 +4055,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4059,11 +4064,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4146,7 +4153,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4155,11 +4162,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4244,7 +4253,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4253,11 +4262,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4348,7 +4359,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4357,11 +4368,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4445,7 +4458,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4454,11 +4467,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4545,11 +4560,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4632,11 +4649,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4719,11 +4738,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4807,11 +4828,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4900,11 +4923,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4985,11 +5010,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5076,11 +5103,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5168,11 +5197,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5257,11 +5288,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5348,11 +5381,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5443,11 +5478,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5531,11 +5568,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5621,11 +5660,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5710,11 +5751,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5798,11 +5841,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5886,11 +5931,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5975,11 +6022,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6069,11 +6118,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6155,11 +6206,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6243,11 +6296,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6328,7 +6383,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6337,11 +6392,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6415,7 +6472,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6424,11 +6481,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6510,7 +6569,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6519,11 +6578,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6620,7 +6681,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6629,11 +6690,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py index 36243d1718..db991c7a09 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py @@ -222,9 +222,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the LlmUtilityServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py index 633ffaa04b..7b0128f410 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py @@ -556,9 +556,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the LlmUtilityServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/base.py index 38a6a29c55..8e9bdccac8 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc.py index af76babd19..a8646d5733 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc_asyncio.py index fcf96c4fd5..bbf8989873 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py index 4ae07a330a..48037054d7 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py @@ -351,9 +351,6 @@ class LlmUtilityServiceRestTransport(LlmUtilityServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -373,39 +370,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -496,7 +489,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -505,11 +498,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py index f1611c504a..e8ecef8708 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py @@ -218,9 +218,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MatchServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/client.py b/google/cloud/aiplatform_v1beta1/services/match_service/client.py index 1966d98a0c..7879f422ef 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/client.py @@ -552,9 +552,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MatchServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/base.py index 0ed83be2ad..2b709a1350 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc.py index a07cbdc9b2..8187c52031 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc_asyncio.py index bc7bc5ec4a..006c600cad 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py index b70be57035..e3b7c6c756 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py @@ -380,9 +380,6 @@ class MatchServiceRestTransport(MatchServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -402,39 +399,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -520,7 +513,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -529,11 +522,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -615,7 +610,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -624,11 +619,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index 4cd66f1f6c..84204a5992 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -252,9 +252,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MetadataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index cea276b1ae..bcf6bc6deb 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -670,9 +670,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MetadataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py index 443920d807..18bf9a5615 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -97,6 +97,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -109,7 +111,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index b8087a632c..bbdddb7365 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -138,7 +138,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py index 758a1c99ca..02758b119b 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -185,7 +185,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py index ef3690d91c..a272d650be 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py @@ -1325,9 +1325,6 @@ class MetadataServiceRestTransport(MetadataServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1347,39 +1344,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3528,7 +3521,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3537,11 +3530,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3623,7 +3618,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3632,11 +3627,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3718,7 +3715,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3727,11 +3724,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3809,7 +3808,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3818,11 +3817,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3900,7 +3901,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3909,11 +3910,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3993,7 +3996,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4002,11 +4005,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4086,7 +4091,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4095,11 +4100,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4182,7 +4189,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4191,11 +4198,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4277,11 +4286,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4362,11 +4373,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4449,11 +4462,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4536,11 +4551,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4618,11 +4635,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4702,11 +4721,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4786,11 +4807,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4872,11 +4895,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4961,11 +4986,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5047,11 +5074,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5133,11 +5162,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5219,11 +5250,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5307,11 +5340,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5395,11 +5430,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5479,7 +5516,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5488,11 +5525,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5571,7 +5610,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5580,11 +5619,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5665,7 +5706,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5674,11 +5715,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5765,11 +5808,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5855,11 +5900,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5950,11 +5997,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6035,7 +6084,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6044,11 +6093,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6126,7 +6177,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6135,11 +6186,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6217,7 +6270,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6226,11 +6279,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6310,7 +6365,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6319,11 +6374,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index e7d98f8078..a7552e626e 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -239,9 +239,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MigrationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index b54b12bcba..007273fbb2 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -216,40 +216,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -686,9 +686,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the MigrationServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py index e9ef4b93d6..b967f86b99 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -87,6 +87,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -99,7 +101,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index 22b894d9ff..a4936debbf 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py index ca009de5aa..83d5e1e0d2 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py index 3a8b14120c..0d6aed274e 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py @@ -388,9 +388,6 @@ class MigrationServiceRestTransport(MigrationServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -410,39 +407,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2586,7 +2579,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2595,11 +2588,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2680,7 +2675,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2689,11 +2684,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py index e3bd80325d..95e6fedb59 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py @@ -227,9 +227,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelGardenServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py index 4c5ca703d4..c712ce578d 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py @@ -553,9 +553,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelGardenServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/base.py index 3c31aa2f84..781cce057e 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/base.py @@ -87,6 +87,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -99,7 +101,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc.py index 45ac378c31..7f192e82a4 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc_asyncio.py index 07bfaa0492..72eab99367 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py index e3eda9c3cc..e4c8e67b58 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py @@ -387,9 +387,6 @@ class ModelGardenServiceRestTransport(ModelGardenServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -409,39 +406,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -530,11 +523,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -618,11 +613,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py index b2a87345a6..c6e7bb54d5 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py @@ -267,9 +267,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelMonitoringServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py index d099ab2711..3bf2401174 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py @@ -712,9 +712,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelMonitoringServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py index eee3826b14..5426e8000c 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py @@ -92,6 +92,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -104,7 +106,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py index 3cd7493f5f..bf2a6f4d2d 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py @@ -135,7 +135,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py index 413052ae38..df9b3d64ea 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py @@ -182,7 +182,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py index 58941067e3..b3ff20dacf 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py @@ -696,9 +696,6 @@ class ModelMonitoringServiceRestTransport(ModelMonitoringServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -718,39 +715,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2894,7 +2887,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2903,11 +2896,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2991,7 +2986,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3000,11 +2995,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3090,11 +3087,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3180,11 +3179,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3271,11 +3272,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3362,11 +3365,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3453,11 +3458,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3541,11 +3548,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3629,7 +3638,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3638,11 +3647,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3729,7 +3740,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3738,11 +3749,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3829,7 +3842,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3838,11 +3851,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index c65ce3aa59..25f3665fe3 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -248,9 +248,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 55bd9724e5..73e8054285 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -662,9 +662,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ModelServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index f68bbdade4..4b62063e23 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -94,6 +94,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -106,7 +108,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index b2d85c7dd1..5db61ff3e6 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -135,7 +135,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index f0f9011b60..162ad12830 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -182,7 +182,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py index 2f95412d07..c89d996148 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py @@ -885,9 +885,6 @@ class ModelServiceRestTransport(ModelServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -907,39 +904,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3088,7 +3081,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3097,11 +3090,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3189,7 +3184,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3198,11 +3193,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3283,7 +3280,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3292,11 +3289,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3378,11 +3377,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3465,11 +3466,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3547,7 +3550,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3556,11 +3559,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3639,11 +3644,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3729,11 +3736,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3820,11 +3829,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3907,7 +3918,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3916,11 +3927,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4005,11 +4018,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4094,11 +4109,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4180,11 +4197,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4268,11 +4287,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4351,7 +4372,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4360,11 +4381,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4448,7 +4471,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4457,11 +4480,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4539,7 +4564,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4548,11 +4573,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4633,7 +4660,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4642,11 +4669,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py index b0cd94ed52..1800db5caf 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py @@ -265,9 +265,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the NotebookServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py index 01abce0fb5..68f7414072 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py @@ -686,9 +686,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the NotebookServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py index e3926381e5..562d50b65c 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py index e7fa85b19c..66fd153dee 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py @@ -131,7 +131,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py index 514481ce1b..a0b7620014 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py @@ -178,7 +178,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py index d000d33772..bdb1de52f0 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py @@ -813,9 +813,6 @@ class NotebookServiceRestTransport(NotebookServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -835,39 +832,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3011,7 +3004,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3020,11 +3013,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3106,7 +3101,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3115,11 +3110,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3203,7 +3200,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3212,11 +3209,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3301,11 +3300,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3388,11 +3389,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3478,11 +3481,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3565,11 +3570,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3655,11 +3662,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3747,11 +3756,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3836,11 +3847,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3924,11 +3937,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4015,11 +4030,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4101,7 +4118,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4110,11 +4127,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4202,7 +4221,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4211,11 +4230,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4298,7 +4319,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4307,11 +4328,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py index 0001ec8de5..054360a275 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py @@ -54,6 +54,7 @@ persistent_resource as gca_persistent_resource, ) from google.cloud.aiplatform_v1beta1.types import persistent_resource_service +from google.cloud.aiplatform_v1beta1.types import service_networking from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -87,6 +88,12 @@ class PersistentResourceServiceAsyncClient: parse_network_path = staticmethod( PersistentResourceServiceClient.parse_network_path ) + network_attachment_path = staticmethod( + PersistentResourceServiceClient.network_attachment_path + ) + parse_network_attachment_path = staticmethod( + PersistentResourceServiceClient.parse_network_attachment_path + ) notebook_runtime_template_path = staticmethod( PersistentResourceServiceClient.notebook_runtime_template_path ) @@ -259,9 +266,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PersistentResourceServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py index b781b6fe76..90b8ee19c1 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py @@ -58,6 +58,7 @@ persistent_resource as gca_persistent_resource, ) from google.cloud.aiplatform_v1beta1.types import persistent_resource_service +from google.cloud.aiplatform_v1beta1.types import service_networking from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -219,6 +220,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_attachment_path( + project: str, + region: str, + networkattachment: str, + ) -> str: + """Returns a fully-qualified network_attachment string.""" + return "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format( + project=project, + region=region, + networkattachment=networkattachment, + ) + + @staticmethod + def parse_network_attachment_path(path: str) -> Dict[str, str]: + """Parses a network_attachment path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/networkAttachments/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def notebook_runtime_template_path( project: str, @@ -613,9 +636,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PersistentResourceServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py index eb89f44908..ca8bcf5857 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py index c063fcb2c0..0a9daf3ce6 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py @@ -130,7 +130,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py index 3b0496a0b1..9519521a07 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py @@ -177,7 +177,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py index 233d6fb00d..7a6bff4806 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py @@ -527,9 +527,6 @@ class PersistentResourceServiceRestTransport(PersistentResourceServiceTransport) It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -549,39 +546,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2730,7 +2723,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2739,11 +2732,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2830,11 +2825,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2921,11 +2918,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3011,11 +3010,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3102,7 +3103,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3111,11 +3112,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3201,7 +3204,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3210,11 +3213,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index c42543dc55..1b88c1eb48 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -260,9 +260,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PipelineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 61a70dd872..4d20c66776 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -753,9 +753,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PipelineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 206adb5c16..e6335b762d 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -94,6 +94,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -106,7 +108,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 77781369c8..f8727fccde 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -138,7 +138,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index cb16a4f555..d9d22c76b9 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -185,7 +185,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py index aad6b80c66..796249a959 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py @@ -685,9 +685,6 @@ class PipelineServiceRestTransport(PipelineServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -707,39 +704,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2884,7 +2877,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2893,11 +2886,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2979,7 +2974,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2988,11 +2983,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3066,7 +3063,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3075,11 +3072,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3147,7 +3146,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3156,11 +3155,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3234,7 +3235,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3243,11 +3244,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3333,7 +3336,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3342,11 +3345,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3432,11 +3437,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3519,11 +3526,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3605,11 +3614,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3697,11 +3708,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3785,11 +3798,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3873,11 +3888,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 38a7c07902..ed48998937 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -238,9 +238,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PredictionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -1619,17 +1616,8 @@ async def sample_count_tokens(): client = aiplatform_v1beta1.PredictionServiceAsyncClient() # Initialize request argument(s) - instances = aiplatform_v1beta1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1beta1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request @@ -1651,7 +1639,7 @@ async def sample_count_tokens(): on the ``request`` instance; if ``request`` is provided, this should not be set. instances (:class:`MutableSequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index d434835e36..24db012105 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -629,9 +629,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the PredictionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -2074,17 +2071,8 @@ def sample_count_tokens(): client = aiplatform_v1beta1.PredictionServiceClient() # Initialize request argument(s) - instances = aiplatform_v1beta1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1beta1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request @@ -2106,7 +2094,7 @@ def sample_count_tokens(): on the ``request`` instance; if ``request`` is provided, this should not be set. instances (MutableSequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index c36586d211..c619b1b4d1 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 8f0995834b..dd5b70a293 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -127,7 +127,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index d7aec5472c..a230b0f116 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -174,7 +174,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py index 6c3f9d5f42..33c179b05f 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py @@ -662,9 +662,6 @@ class PredictionServiceRestTransport(PredictionServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -684,39 +681,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -850,7 +843,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -859,11 +852,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -945,7 +940,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -954,11 +949,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1038,7 +1035,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1047,11 +1044,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1133,7 +1132,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1142,11 +1141,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1226,7 +1227,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1235,11 +1236,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1325,7 +1328,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1334,11 +1337,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1423,7 +1428,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1432,11 +1437,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1568,7 +1575,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1577,11 +1584,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1673,7 +1682,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1682,11 +1691,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1803,7 +1814,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1812,11 +1823,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -1981,7 +1994,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1990,11 +2003,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py index d46dc9d9a6..e689bef131 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py @@ -238,9 +238,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ReasoningEngineExecutionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py index bc9b7fd1fa..c52e0d143d 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py @@ -565,9 +565,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ReasoningEngineExecutionServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py index 5e77b1ffeb..f19927047d 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py index 7bb838ae18..6c3a8a29fe 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py @@ -128,7 +128,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py index 1c7b740102..135a29eb26 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py @@ -175,7 +175,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py index 98a07b2e9b..6f412b3856 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py @@ -356,9 +356,6 @@ class ReasoningEngineExecutionServiceRestTransport( It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -378,39 +375,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -504,7 +497,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -513,11 +506,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py index eea1a30ea4..3b776f2451 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py @@ -239,9 +239,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ReasoningEngineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py index bca54794f1..cb50b27320 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py @@ -568,9 +568,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ReasoningEngineServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/base.py index 30af0baf14..007b41c117 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc.py index 05994be372..02ac186843 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc_asyncio.py index e758c3a571..f524bb7182 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py index 8906d50c32..1a0e93d681 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py @@ -487,9 +487,6 @@ class ReasoningEngineServiceRestTransport(ReasoningEngineServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -509,39 +506,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2687,7 +2680,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2696,11 +2689,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2786,11 +2781,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2874,11 +2871,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2964,11 +2963,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3054,7 +3055,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3063,11 +3064,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py index a1b06fea58..542ad106bc 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py @@ -280,9 +280,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ScheduleServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py index e93ad36701..dd9c6cb990 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py @@ -859,9 +859,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the ScheduleServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/base.py index 2e281f19b7..8183b103ae 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc.py index abd279d319..03d01c7b1c 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc.py @@ -133,7 +133,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc_asyncio.py index cf1d29a7fb..a10bdc62db 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/grpc_asyncio.py @@ -180,7 +180,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py index dbef129414..c5a841ca5d 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py @@ -511,9 +511,6 @@ class ScheduleServiceRestTransport(ScheduleServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -533,39 +530,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2708,7 +2701,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2717,11 +2710,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2805,11 +2800,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2891,11 +2888,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2977,11 +2976,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3054,7 +3055,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3063,11 +3064,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3133,7 +3136,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3142,11 +3145,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3222,7 +3227,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3231,11 +3236,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 8c84c00523..83a445a311 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -242,9 +242,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the SpecialistPoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index 0925ab6e75..06b0f507a1 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -571,9 +571,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the SpecialistPoolServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index 02f5aa333e..68e1254c5d 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py index 47322c234b..5eaa5c3cfb 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py index f75843c83f..0e8cff16b8 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py index 992d135512..aca0b39851 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py @@ -492,9 +492,6 @@ class SpecialistPoolServiceRestTransport(SpecialistPoolServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -514,39 +511,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2690,7 +2683,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2699,11 +2692,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2787,11 +2782,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2883,11 +2880,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2971,11 +2970,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3059,7 +3060,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3068,11 +3069,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 9f506e1e87..e598b869f0 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -263,9 +263,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the TensorboardServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 48553ea191..32fe9b9bf5 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -657,9 +657,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the TensorboardServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py index ec0c8303f5..a3f69e94e1 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -98,6 +98,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -110,7 +112,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py index c96ec38a2a..863017958b 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -139,7 +139,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py index 6b1b064629..46d02b435a 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -186,7 +186,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py index 8b8d1417e3..8c0997b526 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py @@ -1321,9 +1321,6 @@ class TensorboardServiceRestTransport(TensorboardServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -1343,39 +1340,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -3521,7 +3514,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3530,11 +3523,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3622,7 +3617,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3631,11 +3626,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3732,11 +3729,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3820,7 +3819,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3829,11 +3828,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3920,7 +3921,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3929,11 +3930,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4019,7 +4022,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4028,11 +4031,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4117,7 +4122,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4126,11 +4131,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4216,11 +4223,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4306,11 +4315,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4393,11 +4404,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4483,11 +4496,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4572,7 +4587,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4581,11 +4596,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4674,11 +4691,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4765,11 +4784,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4855,11 +4876,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4944,11 +4967,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5035,11 +5060,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5123,11 +5150,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5211,11 +5240,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5302,11 +5333,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5391,11 +5424,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5478,11 +5513,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5569,11 +5606,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5657,11 +5696,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5745,7 +5786,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5754,11 +5795,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5845,7 +5888,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5854,11 +5897,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -5944,7 +5989,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -5953,11 +5998,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6044,7 +6091,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6053,11 +6100,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6142,7 +6191,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6151,11 +6200,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -6240,7 +6291,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -6249,11 +6300,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py index fda970b069..b1d6c1a9b1 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py @@ -85,6 +85,10 @@ class VertexRagDataServiceAsyncClient: ) rag_file_path = staticmethod(VertexRagDataServiceClient.rag_file_path) parse_rag_file_path = staticmethod(VertexRagDataServiceClient.parse_rag_file_path) + secret_version_path = staticmethod(VertexRagDataServiceClient.secret_version_path) + parse_secret_version_path = staticmethod( + VertexRagDataServiceClient.parse_secret_version_path + ) common_billing_account_path = staticmethod( VertexRagDataServiceClient.common_billing_account_path ) @@ -239,9 +243,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VertexRagDataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py index 9a44d8dfd8..950e2b4116 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py @@ -284,6 +284,28 @@ def parse_rag_file_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def secret_version_path( + project: str, + secret: str, + secret_version: str, + ) -> str: + """Returns a fully-qualified secret_version string.""" + return "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + + @staticmethod + def parse_secret_version_path(path: str) -> Dict[str, str]: + """Parses a secret_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/secrets/(?P.+?)/versions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path( billing_account: str, @@ -632,9 +654,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VertexRagDataServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py index 796c4fa57f..c96aaec4ab 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py @@ -88,6 +88,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -100,7 +102,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py index 11b9e7599d..adddd14811 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py @@ -129,7 +129,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py index 157ff346a7..13d74aca8a 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py @@ -176,7 +176,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py index f1485a7361..f623649e32 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py @@ -609,9 +609,6 @@ class VertexRagDataServiceRestTransport(VertexRagDataServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -631,39 +628,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2807,7 +2800,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2816,11 +2809,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2904,11 +2899,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -2989,11 +2986,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3074,11 +3073,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3160,11 +3161,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3246,7 +3249,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3255,11 +3258,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3342,11 +3347,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3428,11 +3435,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3511,7 +3520,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3520,11 +3529,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py index bc7fe08de0..3775170579 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py @@ -219,9 +219,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VertexRagServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py index b09149b6c7..a7b19c74de 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py @@ -553,9 +553,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VertexRagServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py index 154482f3ee..a1cff6b391 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py @@ -86,6 +86,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -98,7 +100,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py index a15cad9583..64854819ac 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py @@ -126,7 +126,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py index 9a05eb711a..b61c4374e5 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py @@ -173,7 +173,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py index 63a96211b0..3707604ebf 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py @@ -351,9 +351,6 @@ class VertexRagServiceRestTransport(VertexRagServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -373,39 +370,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -493,7 +486,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -502,11 +495,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index 1c0f39a8ab..2665192714 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -232,9 +232,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VizierServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 2e6b4f720a..f7eeb79c46 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -606,9 +606,6 @@ def __init__( If a Callable is given, it will be called with the same set of initialization arguments as used in the VizierServiceTransport constructor. If set to None, a transport is chosen automatically. - NOTE: "rest" transport functionality is currently in a - beta state (preview). We welcome your feedback via an - issue in this library's source repository. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py index 0c716c6428..9d6a13ce7a 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -90,6 +90,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -102,7 +104,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index 983c01ea84..802e1eb230 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -135,7 +135,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index 0ec33f049d..989b8ae419 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -182,7 +182,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py index 4bdca85e92..03f92f50d4 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py @@ -749,9 +749,6 @@ class VizierServiceRestTransport(VizierServiceTransport): It sends JSON representations of protocol buffers over HTTP/1.1 - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via an issue in this - library's source repository. Thank you! """ def __init__( @@ -771,39 +768,35 @@ def __init__( ) -> None: """Instantiate the transport. - NOTE: This REST transport functionality is currently in a beta - state (preview). We welcome your feedback via a GitHub issue in - this library's repository. Thank you! - - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. @@ -2949,7 +2942,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2958,11 +2951,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3046,7 +3041,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3055,11 +3050,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3140,7 +3137,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3149,11 +3146,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3231,7 +3230,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3240,11 +3239,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3327,7 +3328,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3336,11 +3337,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3417,11 +3420,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3489,11 +3494,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3565,11 +3572,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3654,11 +3663,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3739,7 +3750,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3748,11 +3759,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3835,11 +3848,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -3921,11 +3936,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4002,7 +4019,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4011,11 +4028,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4098,7 +4117,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4107,11 +4126,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" @@ -4192,7 +4213,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=False + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -4201,11 +4222,13 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - use_integers_for_enums=False, + use_integers_for_enums=True, ) ) query_params.update(self._get_unset_required_fields(query_params)) + query_params["$alt"] = "json;enum-encoding=int" + # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index f2a342f232..2ed65d01fe 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -22,6 +22,9 @@ from .annotation_spec import ( AnnotationSpec, ) +from .api_auth import ( + ApiAuth, +) from .artifact import ( Artifact, ) @@ -531,6 +534,8 @@ GcsDestination, GcsSource, GoogleDriveSource, + JiraSource, + SlackSource, TFRecordDestination, ) from .job_service import ( @@ -862,6 +867,7 @@ ) from .persistent_resource import ( PersistentResource, + RayLogsSpec, RayMetricSpec, RaySpec, ResourcePool, @@ -925,6 +931,9 @@ ExplainResponse, GenerateContentRequest, GenerateContentResponse, + GenerateVideoResponse, + PredictLongRunningMetadata, + PredictLongRunningResponse, PredictRequest, PredictResponse, RawPredictRequest, @@ -978,6 +987,7 @@ from .service_networking import ( PrivateServiceConnectConfig, PscAutomatedEndpoints, + PscInterfaceConfig, ) from .specialist_pool import ( SpecialistPool, @@ -1091,6 +1101,11 @@ TrainingPipeline, ) from .tuning_job import ( + DatasetDistribution, + DatasetStats, + DistillationDataStats, + DistillationHyperParameters, + DistillationSpec, SupervisedHyperParameters, SupervisedTuningDatasetDistribution, SupervisedTuningDataStats, @@ -1175,6 +1190,7 @@ "AcceleratorType", "Annotation", "AnnotationSpec", + "ApiAuth", "Artifact", "BatchPredictionJob", "CachedContent", @@ -1581,6 +1597,8 @@ "GcsDestination", "GcsSource", "GoogleDriveSource", + "JiraSource", + "SlackSource", "TFRecordDestination", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", @@ -1840,6 +1858,7 @@ "DeleteOperationMetadata", "GenericOperationMetadata", "PersistentResource", + "RayLogsSpec", "RayMetricSpec", "RaySpec", "ResourcePool", @@ -1891,6 +1910,9 @@ "ExplainResponse", "GenerateContentRequest", "GenerateContentResponse", + "GenerateVideoResponse", + "PredictLongRunningMetadata", + "PredictLongRunningResponse", "PredictRequest", "PredictResponse", "RawPredictRequest", @@ -1928,6 +1950,7 @@ "UpdateScheduleRequest", "PrivateServiceConnectConfig", "PscAutomatedEndpoints", + "PscInterfaceConfig", "SpecialistPool", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", @@ -2017,6 +2040,11 @@ "StratifiedSplit", "TimestampSplit", "TrainingPipeline", + "DatasetDistribution", + "DatasetStats", + "DistillationDataStats", + "DistillationHyperParameters", + "DistillationSpec", "SupervisedHyperParameters", "SupervisedTuningDatasetDistribution", "SupervisedTuningDataStats", diff --git a/google/cloud/aiplatform_v1beta1/types/api_auth.py b/google/cloud/aiplatform_v1beta1/types/api_auth.py new file mode 100644 index 0000000000..dc2a7ebe9a --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/api_auth.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ApiAuth", + }, +) + + +class ApiAuth(proto.Message): + r"""The generic reusable api auth config.""" + + class ApiKeyConfig(proto.Message): + r"""The API secret. + + Attributes: + api_key_secret_version (str): + Required. The SecretManager secret version + resource name storing API key. e.g. + projects/{project}/secrets/{secret}/versions/{version} + """ + + api_key_secret_version: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index a9ef252da6..7e02336449 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -254,6 +254,10 @@ class BatchPredictionJob(proto.Message): User can disable container logging by setting this flag to true. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ class InputConfig(proto.Message): @@ -720,6 +724,14 @@ class OutputInfo(proto.Message): proto.BOOL, number=34, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=36, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=37, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/cached_content.py b/google/cloud/aiplatform_v1beta1/types/cached_content.py index 9d61bf131c..c46a8a5068 100644 --- a/google/cloud/aiplatform_v1beta1/types/cached_content.py +++ b/google/cloud/aiplatform_v1beta1/types/cached_content.py @@ -60,6 +60,9 @@ class CachedContent(proto.Message): Immutable. Identifier. The server-generated resource name of the cached content Format: projects/{project}/locations/{location}/cachedContents/{cached_content} + display_name (str): + Optional. Immutable. The user-generated + meaningful display name of the cached content. model (str): Immutable. The name of the publisher model to use for cached content. Format: @@ -101,6 +104,10 @@ class CachedContent(proto.Message): proto.STRING, number=1, ) + display_name: str = proto.Field( + proto.STRING, + number=11, + ) model: str = proto.Field( proto.STRING, number=2, diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 8b52c4da35..1274ff7a0a 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -539,12 +539,35 @@ class Scheduling(proto.Message): gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job. + strategy (google.cloud.aiplatform_v1beta1.types.Scheduling.Strategy): + Optional. This determines which type of + scheduling strategy to use. disable_retries (bool): Optional. Indicates if the job should retry for internal errors after the job starts running. If true, overrides ``Scheduling.restart_job_on_worker_restart`` to false. """ + class Strategy(proto.Enum): + r"""Optional. This determines which type of scheduling strategy to use. + Right now users have two options such as ON_DEMAND which will use + regular on demand resources to schedule the job, the other is + LOW_COST which would leverage spot resources alongwith regular + resources to schedule the job. + + Values: + STRATEGY_UNSPECIFIED (0): + Strategy will default to ON_DEMAND. + ON_DEMAND (1): + Regular on-demand provisioning strategy. + LOW_COST (2): + Low cost by making potential use of spot + resources. + """ + STRATEGY_UNSPECIFIED = 0 + ON_DEMAND = 1 + LOW_COST = 2 + timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=1, @@ -554,6 +577,11 @@ class Scheduling(proto.Message): proto.BOOL, number=3, ) + strategy: Strategy = proto.Field( + proto.ENUM, + number=4, + enum=Strategy, + ) disable_retries: bool = proto.Field( proto.BOOL, number=5, diff --git a/google/cloud/aiplatform_v1beta1/types/data_item.py b/google/cloud/aiplatform_v1beta1/types/data_item.py index bcbbb72598..f2c63e33fa 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_item.py +++ b/google/cloud/aiplatform_v1beta1/types/data_item.py @@ -70,6 +70,10 @@ class DataItem(proto.Message): Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -100,6 +104,14 @@ class DataItem(proto.Message): proto.STRING, number=7, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=10, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=11, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 7d4dd11830..8ce5efe612 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -42,8 +42,8 @@ class Dataset(proto.Message): Attributes: name (str): - Output only. The resource name of the - Dataset. + Output only. Identifier. The resource name of + the Dataset. display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters @@ -115,6 +115,10 @@ class Dataset(proto.Message): Optional. Reference to the public base model last used by the dataset. Only set for prompt datasets. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -179,6 +183,14 @@ class Dataset(proto.Message): proto.STRING, number=18, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=19, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=20, + ) class ImportDataConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_version.py b/google/cloud/aiplatform_v1beta1/types/dataset_version.py index ce4ed622a2..e1e2e96c8c 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_version.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_version.py @@ -36,8 +36,8 @@ class DatasetVersion(proto.Message): Attributes: name (str): - Output only. The resource name of the - DatasetVersion. + Output only. Identifier. The resource name of + the DatasetVersion. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DatasetVersion was created. @@ -62,6 +62,10 @@ class DatasetVersion(proto.Message): Output only. Reference to the public base model last used by the dataset version. Only set for prompt dataset versions. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -99,6 +103,14 @@ class DatasetVersion(proto.Message): proto.STRING, number=9, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=10, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=11, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py b/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py index 9b33d28685..224564dd0f 100644 --- a/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py +++ b/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py @@ -76,6 +76,10 @@ class DeploymentResourcePool(proto.Message): create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DeploymentResourcePool was created. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -105,6 +109,14 @@ class DeploymentResourcePool(proto.Message): number=4, message=timestamp_pb2.Timestamp, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=8, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=9, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index fa9200892a..7abd76a497 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -140,6 +140,23 @@ class Endpoint(proto.Message): predict_request_response_logging_config (google.cloud.aiplatform_v1beta1.types.PredictRequestResponseLoggingConfig): Configures the request-response logging for online prediction. + dedicated_endpoint_enabled (bool): + If true, the endpoint will be exposed through a dedicated + DNS [Endpoint.dedicated_endpoint_dns]. Your request to the + dedicated DNS will be isolated from other users' traffic and + will have better performance and reliability. Note: Once you + enabled dedicated endpoint, you won't be able to send + request to the shared DNS + {region}-aiplatform.googleapis.com. The limitation will be + removed soon. + dedicated_endpoint_dns (str): + Output only. DNS of the dedicated endpoint. Will only be + populated if dedicated_endpoint_enabled is true. Format: + ``https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog``. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ name: str = proto.Field( @@ -214,6 +231,22 @@ class Endpoint(proto.Message): message="PredictRequestResponseLoggingConfig", ) ) + dedicated_endpoint_enabled: bool = proto.Field( + proto.BOOL, + number=24, + ) + dedicated_endpoint_dns: str = proto.Field( + proto.STRING, + number=25, + ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=27, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=28, + ) class DeployedModel(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py index 9d7d3c87a5..4720ba1e76 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py @@ -236,10 +236,11 @@ class UpdateFeatureOnlineStoreRequest(proto.Message): Updatable fields: - - ``big_query_source`` - - ``bigtable`` - ``labels`` - - ``sync_config`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` """ feature_online_store: gca_feature_online_store.FeatureOnlineStore = proto.Field( @@ -473,7 +474,14 @@ class UpdateFeatureViewRequest(proto.Message): Updatable fields: - ``labels`` - - ``serviceAgentType`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` """ feature_view: gca_feature_view.FeatureView = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store_service.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store_service.py index b331b05511..46430517b3 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store_service.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store_service.py @@ -369,6 +369,8 @@ class NearestNeighborQuery(proto.Message): be retrieved from feature view for each query. string_filters (MutableSequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborQuery.StringFilter]): Optional. The list of string filters. + numeric_filters (MutableSequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborQuery.NumericFilter]): + Optional. The list of numeric filters. per_crowding_attribute_neighbor_count (int): Optional. Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more @@ -427,6 +429,106 @@ class StringFilter(proto.Message): number=3, ) + class NumericFilter(proto.Message): + r"""Numeric filter is used to search a subset of the entities by using + boolean rules on numeric columns. For example: Database Point 0: + {name: “a” value_int: 42} {name: “b” value_float: 1.0} Database + Point 1: {name: “a” value_int: 10} {name: “b” value_float: 2.0} + Database Point 2: {name: “a” value_int: -1} {name: “b” value_float: + 3.0} Query: {name: “a” value_int: 12 operator: LESS} // Matches + Point 1, 2 {name: “b” value_float: 2.0 operator: EQUAL} // Matches + Point 1 + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value_int (int): + int value type. + + This field is a member of `oneof`_ ``Value``. + value_float (float): + float value type. + + This field is a member of `oneof`_ ``Value``. + value_double (float): + double value type. + + This field is a member of `oneof`_ ``Value``. + name (str): + Required. Column name in BigQuery that used + as filters. + op (google.cloud.aiplatform_v1beta1.types.NearestNeighborQuery.NumericFilter.Operator): + Optional. This MUST be specified for queries + and must NOT be specified for database points. + + This field is a member of `oneof`_ ``_op``. + """ + + class Operator(proto.Enum): + r"""Datapoints for which Operator is true relative to the query’s + Value field will be allowlisted. + + Values: + OPERATOR_UNSPECIFIED (0): + Unspecified operator. + LESS (1): + Entities are eligible if their value is < the + query's. + LESS_EQUAL (2): + Entities are eligible if their value is <= + the query's. + EQUAL (3): + Entities are eligible if their value is == + the query's. + GREATER_EQUAL (4): + Entities are eligible if their value is >= + the query's. + GREATER (5): + Entities are eligible if their value is > the + query's. + NOT_EQUAL (6): + Entities are eligible if their value is != + the query's. + """ + OPERATOR_UNSPECIFIED = 0 + LESS = 1 + LESS_EQUAL = 2 + EQUAL = 3 + GREATER_EQUAL = 4 + GREATER = 5 + NOT_EQUAL = 6 + + value_int: int = proto.Field( + proto.INT64, + number=2, + oneof="Value", + ) + value_float: float = proto.Field( + proto.FLOAT, + number=3, + oneof="Value", + ) + value_double: float = proto.Field( + proto.DOUBLE, + number=4, + oneof="Value", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + op: "NearestNeighborQuery.NumericFilter.Operator" = proto.Field( + proto.ENUM, + number=5, + optional=True, + enum="NearestNeighborQuery.NumericFilter.Operator", + ) + class Parameters(proto.Message): r"""Parameters that can be overrided in each query to tune query latency and recall. @@ -474,6 +576,11 @@ class Parameters(proto.Message): number=4, message=StringFilter, ) + numeric_filters: MutableSequence[NumericFilter] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=NumericFilter, + ) per_crowding_attribute_neighbor_count: int = proto.Field( proto.INT32, number=5, diff --git a/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py b/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py index ff272bc5c9..b78a40bd9f 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py @@ -219,6 +219,9 @@ class UpdateFeatureGroupRequest(proto.Message): Updatable fields: - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` """ feature_group: gca_feature_group.FeatureGroup = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index df85b675ee..3f9d504a2f 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -1536,8 +1536,10 @@ class UpdateFeatureRequest(proto.Message): - ``description`` - ``labels`` - - ``disable_monitoring`` (Not supported for FeatureRegistry - Feature) + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) """ feature: gca_feature.Feature = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index 53cc33df51..ebd4bb09c6 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -19,6 +19,9 @@ import proto # type: ignore +from google.cloud.aiplatform_v1beta1.types import api_auth +from google.protobuf import timestamp_pb2 # type: ignore + __protobuf__ = proto.module( package="google.cloud.aiplatform.v1beta1", @@ -34,6 +37,8 @@ "ContainerRegistryDestination", "GoogleDriveSource", "DirectUploadSource", + "SlackSource", + "JiraSource", }, ) @@ -260,4 +265,139 @@ class DirectUploadSource(proto.Message): """ +class SlackSource(proto.Message): + r"""The Slack source for the ImportRagFilesRequest. + + Attributes: + channels (MutableSequence[google.cloud.aiplatform_v1beta1.types.SlackSource.SlackChannels]): + Required. The Slack channels. + """ + + class SlackChannels(proto.Message): + r"""SlackChannels contains the Slack channels and corresponding + access token. + + Attributes: + channels (MutableSequence[google.cloud.aiplatform_v1beta1.types.SlackSource.SlackChannels.SlackChannel]): + Required. The Slack channel IDs. + api_key_config (google.cloud.aiplatform_v1beta1.types.ApiAuth.ApiKeyConfig): + Required. The SecretManager secret version + resource name (e.g. + projects/{project}/secrets/{secret}/versions/{version}) + storing the Slack channel access token that has + access to the slack channel IDs. See: + https://api.slack.com/tutorials/tracks/getting-a-token. + """ + + class SlackChannel(proto.Message): + r"""SlackChannel contains the Slack channel ID and the time range + to import. + + Attributes: + channel_id (str): + Required. The Slack channel ID. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The starting timestamp for messages + to import. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The ending timestamp for messages + to import. + """ + + channel_id: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + channels: MutableSequence[ + "SlackSource.SlackChannels.SlackChannel" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="SlackSource.SlackChannels.SlackChannel", + ) + api_key_config: api_auth.ApiAuth.ApiKeyConfig = proto.Field( + proto.MESSAGE, + number=3, + message=api_auth.ApiAuth.ApiKeyConfig, + ) + + channels: MutableSequence[SlackChannels] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=SlackChannels, + ) + + +class JiraSource(proto.Message): + r"""The Jira source for the ImportRagFilesRequest. + + Attributes: + jira_queries (MutableSequence[google.cloud.aiplatform_v1beta1.types.JiraSource.JiraQueries]): + Required. The Jira queries. + """ + + class JiraQueries(proto.Message): + r"""JiraQueries contains the Jira queries and corresponding + authentication. + + Attributes: + projects (MutableSequence[str]): + A list of Jira projects to import in their + entirety. + custom_queries (MutableSequence[str]): + A list of custom Jira queries to import. For + information about JQL (Jira Query Language), see + https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ + email (str): + Required. The Jira email address. + server_uri (str): + Required. The Jira server URI. + api_key_config (google.cloud.aiplatform_v1beta1.types.ApiAuth.ApiKeyConfig): + Required. The SecretManager secret version + resource name (e.g. + projects/{project}/secrets/{secret}/versions/{version}) + storing the Jira API key + (https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). + """ + + projects: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + custom_queries: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=4, + ) + email: str = proto.Field( + proto.STRING, + number=5, + ) + server_uri: str = proto.Field( + proto.STRING, + number=6, + ) + api_key_config: api_auth.ApiAuth.ApiKeyConfig = proto.Field( + proto.MESSAGE, + number=7, + message=api_auth.ApiAuth.ApiKeyConfig, + ) + + jira_queries: MutableSequence[JiraQueries] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=JiraQueries, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_garden_service.py b/google/cloud/aiplatform_v1beta1/types/model_garden_service.py index dbb7f4e53f..a40e6fb087 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_garden_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_garden_service.py @@ -66,11 +66,13 @@ class GetPublisherModelRequest(proto.Message): language_code (str): Optional. The IETF BCP-47 language code representing the language in which the publisher - model's text information should be written in - (see go/bcp47). + model's text information should be written in. view (google.cloud.aiplatform_v1beta1.types.PublisherModelView): Optional. PublisherModel view specifying which fields to read. + is_hugging_face_model (bool): + Optional. Boolean indicates whether the + requested model is a Hugging Face model. """ name: str = proto.Field( @@ -86,6 +88,10 @@ class GetPublisherModelRequest(proto.Message): number=3, enum="PublisherModelView", ) + is_hugging_face_model: bool = proto.Field( + proto.BOOL, + number=5, + ) class ListPublisherModelsRequest(proto.Message): @@ -117,9 +123,8 @@ class ListPublisherModelsRequest(proto.Message): language_code (str): Optional. The IETF BCP-47 language code representing the language in which the publisher - models' text information should be written in - (see go/bcp47). If not set, by default English - (en). + models' text information should be written in. + If not set, by default English (en). """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitor.py b/google/cloud/aiplatform_v1beta1/types/model_monitor.py index 33ced09c7d..deae08e7fe 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitor.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitor.py @@ -96,6 +96,10 @@ class ModelMonitor(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelMonitor was updated most recently. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ class ModelMonitoringTarget(proto.Message): @@ -198,6 +202,14 @@ class VertexModelSource(proto.Message): number=7, message=timestamp_pb2.Timestamp, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=17, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=18, + ) class ModelMonitoringSchema(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py index b5d838b1d7..92fcedf874 100644 --- a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py @@ -21,6 +21,7 @@ from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -35,6 +36,7 @@ "ResourceRuntime", "ServiceAccountSpec", "RayMetricSpec", + "RayLogsSpec", }, ) @@ -100,6 +102,9 @@ class PersistentResource(proto.Message): If this field is left unspecified, the resources aren't peered with any network. + psc_interface_config (google.cloud.aiplatform_v1beta1.types.PscInterfaceConfig): + Optional. Configuration for PSC-I for + PersistentResource. encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): Optional. Customer-managed encryption key spec for a PersistentResource. If set, this @@ -205,6 +210,11 @@ class State(proto.Enum): proto.STRING, number=11, ) + psc_interface_config: service_networking.PscInterfaceConfig = proto.Field( + proto.MESSAGE, + number=17, + message=service_networking.PscInterfaceConfig, + ) encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field( proto.MESSAGE, number=12, @@ -389,6 +399,8 @@ class RaySpec(proto.Message): head node by default if this field isn't set. ray_metric_spec (google.cloud.aiplatform_v1beta1.types.RayMetricSpec): Optional. Ray metrics configurations. + ray_logs_spec (google.cloud.aiplatform_v1beta1.types.RayLogsSpec): + Optional. OSS Ray logging configurations. """ image_uri: str = proto.Field( @@ -409,6 +421,11 @@ class RaySpec(proto.Message): number=8, message="RayMetricSpec", ) + ray_logs_spec: "RayLogsSpec" = proto.Field( + proto.MESSAGE, + number=10, + message="RayLogsSpec", + ) class ResourceRuntime(proto.Message): @@ -494,4 +511,19 @@ class RayMetricSpec(proto.Message): ) +class RayLogsSpec(proto.Message): + r"""Configuration for the Ray OSS Logs. + + Attributes: + disabled (bool): + Optional. Flag to disable the export of Ray + OSS logs to Cloud Logging. + """ + + disabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index f8fe14dde8..a39e9bdd88 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -150,6 +150,10 @@ class PipelineJob(proto.Message): preflight_validations (bool): Optional. Whether to do component level validations before job creation. + satisfies_pzs (bool): + Output only. Reserved for future use. + satisfies_pzi (bool): + Output only. Reserved for future use. """ class RuntimeConfig(proto.Message): @@ -343,6 +347,14 @@ class InputArtifact(proto.Message): proto.BOOL, number=26, ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=27, + ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=28, + ) class PipelineTemplateMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 4f65094114..75b0687a0f 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -53,6 +53,9 @@ "GenerateContentRequest", "GenerateContentResponse", "ChatCompletionsRequest", + "PredictLongRunningResponse", + "PredictLongRunningMetadata", + "GenerateVideoResponse", }, ) @@ -730,21 +733,38 @@ class CountTokensRequest(proto.Message): r"""Request message for [PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens]. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: endpoint (str): Required. The name of the Endpoint requested to perform token counting. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` model (str): - Required. The name of the publisher model requested to serve + Optional. The name of the publisher model requested to serve the prediction. Format: ``projects/{project}/locations/{location}/publishers/*/models/*`` instances (MutableSequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to + Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): - Required. Input content. + Optional. Input content. + system_instruction (google.cloud.aiplatform_v1beta1.types.Content): + Optional. The user provided system + instructions for the model. Note: only text + should be used in parts and content in each part + will be in a separate paragraph. + + This field is a member of `oneof`_ ``_system_instruction``. + tools (MutableSequence[google.cloud.aiplatform_v1beta1.types.Tool]): + Optional. A list of ``Tools`` the model may use to generate + the next response. + + A ``Tool`` is a piece of code that enables the system to + interact with external systems to perform an action, or set + of actions, outside of knowledge and scope of the model. """ endpoint: str = proto.Field( @@ -765,6 +785,17 @@ class CountTokensRequest(proto.Message): number=4, message=content.Content, ) + system_instruction: content.Content = proto.Field( + proto.MESSAGE, + number=5, + optional=True, + message=content.Content, + ) + tools: MutableSequence[tool.Tool] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=tool.Tool, + ) class CountTokensResponse(proto.Message): @@ -1012,4 +1043,62 @@ class ChatCompletionsRequest(proto.Message): ) +class PredictLongRunningResponse(proto.Message): + r"""Response message for [PredictionService.PredictLongRunning] + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + generate_video_response (google.cloud.aiplatform_v1beta1.types.GenerateVideoResponse): + The response of the video generation + prediction. + + This field is a member of `oneof`_ ``response``. + """ + + generate_video_response: "GenerateVideoResponse" = proto.Field( + proto.MESSAGE, + number=1, + oneof="response", + message="GenerateVideoResponse", + ) + + +class PredictLongRunningMetadata(proto.Message): + r"""Metadata for PredictLongRunning long running operations.""" + + +class GenerateVideoResponse(proto.Message): + r"""Generate video response. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + generated_samples (MutableSequence[str]): + The cloud storage uris of the generated + videos. + rai_media_filtered_count (int): + Returns if any videos were filtered due to + RAI policies. + + This field is a member of `oneof`_ ``_rai_media_filtered_count``. + rai_media_filtered_reasons (MutableSequence[str]): + Returns rai failure reasons if any. + """ + + generated_samples: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + rai_media_filtered_count: int = proto.Field( + proto.INT32, + number=2, + optional=True, + ) + rai_media_filtered_reasons: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/service_networking.py b/google/cloud/aiplatform_v1beta1/types/service_networking.py index c2f5154153..7d0bec8a5c 100644 --- a/google/cloud/aiplatform_v1beta1/types/service_networking.py +++ b/google/cloud/aiplatform_v1beta1/types/service_networking.py @@ -25,6 +25,7 @@ manifest={ "PrivateServiceConnectConfig", "PscAutomatedEndpoints", + "PscInterfaceConfig", }, ) @@ -80,4 +81,29 @@ class PscAutomatedEndpoints(proto.Message): ) +class PscInterfaceConfig(proto.Message): + r"""Configuration for PSC-I. + + Attributes: + network_attachment (str): + Optional. The full name of the Compute Engine `network + attachment `__ + to attach to the resource. For example, + ``projects/12345/regions/us-central1/networkAttachments/myNA``. + is of the form + ``projects/{project}/regions/{region}/networkAttachments/{networkAttachment}``. + Where {project} is a project number, as in ``12345``, and + {networkAttachment} is a network attachment name. To specify + this field, you must have already [created a network + attachment] + (https://cloud.google.com/vpc/docs/create-manage-network-attachments#create-network-attachments). + This field is only used for resources using PSC-I. + """ + + network_attachment: str = proto.Field( + proto.STRING, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index 971323a5b5..6d6d41beb8 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -292,7 +292,7 @@ class Parameter(proto.Message): class TrialContext(proto.Message): - r"""Next ID: 3 + r""" Attributes: description (str): diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index 36786f3658..290f4eef1a 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -325,10 +325,8 @@ class Retrieval(proto.Message): This field is a member of `oneof`_ ``source``. disable_attribution (bool): - Optional. Disable using the result from this - tool in detecting grounding attribution. This - does not affect how the result is given to the - model for generation. + Optional. Deprecated. This option is no + longer supported. """ vertex_ai_search: "VertexAISearch" = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/tuning_job.py b/google/cloud/aiplatform_v1beta1/types/tuning_job.py index 743c1309f7..d09de06e6c 100644 --- a/google/cloud/aiplatform_v1beta1/types/tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/tuning_job.py @@ -33,9 +33,14 @@ "TunedModel", "SupervisedTuningDatasetDistribution", "SupervisedTuningDataStats", + "DatasetDistribution", + "DatasetStats", + "DistillationDataStats", "TuningDataStats", "SupervisedHyperParameters", "SupervisedTuningSpec", + "DistillationSpec", + "DistillationHyperParameters", }, ) @@ -43,6 +48,11 @@ class TuningJob(proto.Message): r"""Represents a TuningJob that runs with Google owned models. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: @@ -54,6 +64,10 @@ class TuningJob(proto.Message): supervised_tuning_spec (google.cloud.aiplatform_v1beta1.types.SupervisedTuningSpec): Tuning Spec for Supervised Fine Tuning. + This field is a member of `oneof`_ ``tuning_spec``. + distillation_spec (google.cloud.aiplatform_v1beta1.types.DistillationSpec): + Tuning Spec for Distillation. + This field is a member of `oneof`_ ``tuning_spec``. name (str): Output only. Identifier. Resource name of a TuningJob. @@ -112,6 +126,11 @@ class TuningJob(proto.Message): tuning_data_stats (google.cloud.aiplatform_v1beta1.types.TuningDataStats): Output only. The tuning data statistics associated with this [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + pipeline_job (str): + Output only. The resource name of the PipelineJob associated + with the [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``. encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): Customer-managed encryption key options for a TuningJob. If this is set, then all resources @@ -130,6 +149,12 @@ class TuningJob(proto.Message): oneof="tuning_spec", message="SupervisedTuningSpec", ) + distillation_spec: "DistillationSpec" = proto.Field( + proto.MESSAGE, + number=17, + oneof="tuning_spec", + message="DistillationSpec", + ) name: str = proto.Field( proto.STRING, number=1, @@ -191,6 +216,10 @@ class TuningJob(proto.Message): number=15, message="TuningDataStats", ) + pipeline_job: str = proto.Field( + proto.STRING, + number=18, + ) encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field( proto.MESSAGE, number=16, @@ -228,6 +257,9 @@ class SupervisedTuningDatasetDistribution(proto.Message): sum (int): Output only. Sum of a given population of values. + billable_sum (int): + Output only. Sum of a given population of + values that are billable. min_ (float): Output only. The minimum of the population values. @@ -280,6 +312,10 @@ class DatasetBucket(proto.Message): proto.INT64, number=1, ) + billable_sum: int = proto.Field( + proto.INT64, + number=9, + ) min_: float = proto.Field( proto.DOUBLE, number=2, @@ -324,6 +360,9 @@ class SupervisedTuningDataStats(proto.Message): total_billable_character_count (int): Output only. Number of billable characters in the tuning dataset. + total_billable_token_count (int): + Output only. Number of billable tokens in the + tuning dataset. tuning_step_count (int): Output only. Number of tuning steps for this Tuning Job. @@ -353,6 +392,10 @@ class SupervisedTuningDataStats(proto.Message): proto.INT64, number=3, ) + total_billable_token_count: int = proto.Field( + proto.INT64, + number=9, + ) tuning_step_count: int = proto.Field( proto.INT64, number=4, @@ -381,10 +424,193 @@ class SupervisedTuningDataStats(proto.Message): ) +class DatasetDistribution(proto.Message): + r"""Distribution computed over a tuning dataset. + + Attributes: + sum (float): + Output only. Sum of a given population of + values. + min_ (float): + Output only. The minimum of the population + values. + max_ (float): + Output only. The maximum of the population + values. + mean (float): + Output only. The arithmetic mean of the + values in the population. + median (float): + Output only. The median of the values in the + population. + p5 (float): + Output only. The 5th percentile of the values + in the population. + p95 (float): + Output only. The 95th percentile of the + values in the population. + buckets (MutableSequence[google.cloud.aiplatform_v1beta1.types.DatasetDistribution.DistributionBucket]): + Output only. Defines the histogram bucket. + """ + + class DistributionBucket(proto.Message): + r"""Dataset bucket used to create a histogram for the + distribution given a population of values. + + Attributes: + count (int): + Output only. Number of values in the bucket. + left (float): + Output only. Left bound of the bucket. + right (float): + Output only. Right bound of the bucket. + """ + + count: int = proto.Field( + proto.INT64, + number=1, + ) + left: float = proto.Field( + proto.DOUBLE, + number=2, + ) + right: float = proto.Field( + proto.DOUBLE, + number=3, + ) + + sum: float = proto.Field( + proto.DOUBLE, + number=1, + ) + min_: float = proto.Field( + proto.DOUBLE, + number=2, + ) + max_: float = proto.Field( + proto.DOUBLE, + number=3, + ) + mean: float = proto.Field( + proto.DOUBLE, + number=4, + ) + median: float = proto.Field( + proto.DOUBLE, + number=5, + ) + p5: float = proto.Field( + proto.DOUBLE, + number=6, + ) + p95: float = proto.Field( + proto.DOUBLE, + number=7, + ) + buckets: MutableSequence[DistributionBucket] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=DistributionBucket, + ) + + +class DatasetStats(proto.Message): + r"""Statistics computed over a tuning dataset. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tuning_dataset_example_count (int): + Output only. Number of examples in the tuning + dataset. + total_tuning_character_count (int): + Output only. Number of tuning characters in + the tuning dataset. + total_billable_character_count (int): + Output only. Number of billable characters in + the tuning dataset. + tuning_step_count (int): + Output only. Number of tuning steps for this + Tuning Job. + user_input_token_distribution (google.cloud.aiplatform_v1beta1.types.DatasetDistribution): + Output only. Dataset distributions for the + user input tokens. + user_output_token_distribution (google.cloud.aiplatform_v1beta1.types.DatasetDistribution): + Output only. Dataset distributions for the + user output tokens. + + This field is a member of `oneof`_ ``_user_output_token_distribution``. + user_message_per_example_distribution (google.cloud.aiplatform_v1beta1.types.DatasetDistribution): + Output only. Dataset distributions for the + messages per example. + user_dataset_examples (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Output only. Sample user messages in the + training dataset uri. + """ + + tuning_dataset_example_count: int = proto.Field( + proto.INT64, + number=1, + ) + total_tuning_character_count: int = proto.Field( + proto.INT64, + number=2, + ) + total_billable_character_count: int = proto.Field( + proto.INT64, + number=3, + ) + tuning_step_count: int = proto.Field( + proto.INT64, + number=4, + ) + user_input_token_distribution: "DatasetDistribution" = proto.Field( + proto.MESSAGE, + number=5, + message="DatasetDistribution", + ) + user_output_token_distribution: "DatasetDistribution" = proto.Field( + proto.MESSAGE, + number=6, + optional=True, + message="DatasetDistribution", + ) + user_message_per_example_distribution: "DatasetDistribution" = proto.Field( + proto.MESSAGE, + number=7, + message="DatasetDistribution", + ) + user_dataset_examples: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=content.Content, + ) + + +class DistillationDataStats(proto.Message): + r"""Statistics computed for datasets used for distillation. + + Attributes: + training_dataset_stats (google.cloud.aiplatform_v1beta1.types.DatasetStats): + Output only. Statistics computed for the + training dataset. + """ + + training_dataset_stats: "DatasetStats" = proto.Field( + proto.MESSAGE, + number=1, + message="DatasetStats", + ) + + class TuningDataStats(proto.Message): r"""The tuning data statistic values for [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -392,6 +618,10 @@ class TuningDataStats(proto.Message): supervised_tuning_data_stats (google.cloud.aiplatform_v1beta1.types.SupervisedTuningDataStats): The SFT Tuning data stats. + This field is a member of `oneof`_ ``tuning_data_stats``. + distillation_data_stats (google.cloud.aiplatform_v1beta1.types.DistillationDataStats): + Output only. Statistics for distillation. + This field is a member of `oneof`_ ``tuning_data_stats``. """ @@ -401,6 +631,12 @@ class TuningDataStats(proto.Message): oneof="tuning_data_stats", message="SupervisedTuningDataStats", ) + distillation_data_stats: "DistillationDataStats" = proto.Field( + proto.MESSAGE, + number=3, + oneof="tuning_data_stats", + message="DistillationDataStats", + ) class SupervisedHyperParameters(proto.Message): @@ -485,4 +721,120 @@ class SupervisedTuningSpec(proto.Message): ) +class DistillationSpec(proto.Message): + r"""Tuning Spec for Distillation. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + base_teacher_model (str): + The base teacher model that is being + distilled, e.g., "gemini-1.0-pro-002". + + This field is a member of `oneof`_ ``teacher_model``. + tuned_teacher_model_source (str): + The resource name of the Tuned teacher model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + + This field is a member of `oneof`_ ``teacher_model``. + training_dataset_uri (str): + Required. Cloud Storage path to file + containing training dataset for tuning. The + dataset must be formatted as a JSONL file. + validation_dataset_uri (str): + Optional. Cloud Storage path to file + containing validation dataset for tuning. The + dataset must be formatted as a JSONL file. + + This field is a member of `oneof`_ ``_validation_dataset_uri``. + hyper_parameters (google.cloud.aiplatform_v1beta1.types.DistillationHyperParameters): + Optional. Hyperparameters for Distillation. + student_model (str): + The student model that is being tuned, e.g., + "google/gemma-2b-1.1-it". + pipeline_root_directory (str): + Required. A path in a Cloud Storage bucket, + which will be treated as the root output + directory of the distillation pipeline. It is + used by the system to generate the paths of + output artifacts. + """ + + base_teacher_model: str = proto.Field( + proto.STRING, + number=5, + oneof="teacher_model", + ) + tuned_teacher_model_source: str = proto.Field( + proto.STRING, + number=6, + oneof="teacher_model", + ) + training_dataset_uri: str = proto.Field( + proto.STRING, + number=1, + ) + validation_dataset_uri: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + hyper_parameters: "DistillationHyperParameters" = proto.Field( + proto.MESSAGE, + number=3, + message="DistillationHyperParameters", + ) + student_model: str = proto.Field( + proto.STRING, + number=4, + ) + pipeline_root_directory: str = proto.Field( + proto.STRING, + number=7, + ) + + +class DistillationHyperParameters(proto.Message): + r"""Hyperparameters for Distillation. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + epoch_count (int): + Optional. Number of complete passes the model + makes over the entire training dataset during + training. + + This field is a member of `oneof`_ ``_epoch_count``. + learning_rate_multiplier (float): + Optional. Multiplier for adjusting the + default learning rate. + + This field is a member of `oneof`_ ``_learning_rate_multiplier``. + adapter_size (google.cloud.aiplatform_v1beta1.types.SupervisedHyperParameters.AdapterSize): + Optional. Adapter size for distillation. + """ + + epoch_count: int = proto.Field( + proto.INT64, + number=1, + optional=True, + ) + learning_rate_multiplier: float = proto.Field( + proto.DOUBLE, + number=2, + optional=True, + ) + adapter_size: "SupervisedHyperParameters.AdapterSize" = proto.Field( + proto.ENUM, + number=3, + enum="SupervisedHyperParameters.AdapterSize", + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py index dd0df1e600..e97bf7475e 100644 --- a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py @@ -176,6 +176,14 @@ class RagFile(proto.Message): Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request. + This field is a member of `oneof`_ ``rag_file_source``. + slack_source (google.cloud.aiplatform_v1beta1.types.SlackSource): + The RagFile is imported from a Slack channel. + + This field is a member of `oneof`_ ``rag_file_source``. + jira_source (google.cloud.aiplatform_v1beta1.types.JiraSource): + The RagFile is imported from a Jira query. + This field is a member of `oneof`_ ``rag_file_source``. name (str): Output only. The resource name of the @@ -232,6 +240,18 @@ class RagFileType(proto.Enum): oneof="rag_file_source", message=io.DirectUploadSource, ) + slack_source: io.SlackSource = proto.Field( + proto.MESSAGE, + number=11, + oneof="rag_file_source", + message=io.SlackSource, + ) + jira_source: io.JiraSource = proto.Field( + proto.MESSAGE, + number=12, + oneof="rag_file_source", + message=io.JiraSource, + ) name: str = proto.Field( proto.STRING, number=1, @@ -326,6 +346,16 @@ class ImportRagFilesConfig(proto.Message): individual files as well as Google Drive folders. + This field is a member of `oneof`_ ``import_source``. + slack_source (google.cloud.aiplatform_v1beta1.types.SlackSource): + Slack channels with their corresponding + access tokens. + + This field is a member of `oneof`_ ``import_source``. + jira_source (google.cloud.aiplatform_v1beta1.types.JiraSource): + Jira queries with their corresponding + authentication. + This field is a member of `oneof`_ ``import_source``. rag_file_chunking_config (google.cloud.aiplatform_v1beta1.types.RagFileChunkingConfig): Specifies the size and overlap of chunks @@ -353,6 +383,18 @@ class ImportRagFilesConfig(proto.Message): oneof="import_source", message=io.GoogleDriveSource, ) + slack_source: io.SlackSource = proto.Field( + proto.MESSAGE, + number=6, + oneof="import_source", + message=io.SlackSource, + ) + jira_source: io.JiraSource = proto.Field( + proto.MESSAGE, + number=7, + oneof="import_source", + message=io.JiraSource, + ) rag_file_chunking_config: "RagFileChunkingConfig" = proto.Field( proto.MESSAGE, number=4, diff --git a/owlbot.py b/owlbot.py index e01dd68ea8..9c43cccb31 100644 --- a/owlbot.py +++ b/owlbot.py @@ -112,6 +112,7 @@ ".kokoro/continuous/prerelease-deps.cfg", ".kokoro/presubmit/prerelease-deps.cfg", ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/release.sh", # exclude sample configs so periodic samples are tested against main # instead of pypi ".kokoro/samples/python3.7/common.cfg", diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 1d7428b21b..1f5f4512a0 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.60.0" +__version__ = "1.61.0" diff --git a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py index 12cf138787..3e592f8a9e 100644 --- a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py +++ b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py @@ -39,17 +39,8 @@ async def sample_count_tokens(): client = aiplatform_v1.LlmUtilityServiceAsyncClient() # Initialize request argument(s) - instances = aiplatform_v1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py index 96dce7c6e6..2eb51ba868 100644 --- a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py +++ b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py @@ -39,17 +39,8 @@ def sample_count_tokens(): client = aiplatform_v1.LlmUtilityServiceClient() # Initialize request argument(s) - instances = aiplatform_v1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_async.py new file mode 100644 index 0000000000..d66c3a3343 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_create_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_execution_job = aiplatform_v1.NotebookExecutionJob() + notebook_execution_job.notebook_runtime_template_resource_name = "notebook_runtime_template_resource_name_value" + notebook_execution_job.gcs_output_uri = "gcs_output_uri_value" + notebook_execution_job.execution_user = "execution_user_value" + + request = aiplatform_v1.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job=notebook_execution_job, + ) + + # Make the request + operation = client.create_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_sync.py new file mode 100644 index 0000000000..8568cea949 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_execution_job_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_create_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_execution_job = aiplatform_v1.NotebookExecutionJob() + notebook_execution_job.notebook_runtime_template_resource_name = "notebook_runtime_template_resource_name_value" + notebook_execution_job.gcs_output_uri = "gcs_output_uri_value" + notebook_execution_job.execution_user = "execution_user_value" + + request = aiplatform_v1.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job=notebook_execution_job, + ) + + # Make the request + operation = client.create_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_async.py new file mode 100644 index 0000000000..0f6fa9a9b9 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_delete_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_sync.py new file mode 100644 index 0000000000..01bad030b1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_delete_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_execution_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_async.py new file mode 100644 index 0000000000..549db5d6db --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_execution_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_sync.py new file mode 100644 index 0000000000..87d65ee67a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_execution_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookExecutionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_notebook_execution_job(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookExecutionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_execution_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_async.py new file mode 100644 index 0000000000..78d9b4fc24 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookExecutionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_notebook_execution_jobs(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookExecutionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_execution_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_sync.py new file mode 100644 index 0000000000..4a60eb3a87 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookExecutionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_notebook_execution_jobs(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookExecutionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_execution_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py index f76250482d..fc1ad9ef31 100644 --- a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py @@ -39,17 +39,8 @@ async def sample_count_tokens(): client = aiplatform_v1beta1.PredictionServiceAsyncClient() # Initialize request argument(s) - instances = aiplatform_v1beta1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1beta1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py index e3fda5c326..ecfab9b8cd 100644 --- a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py @@ -39,17 +39,8 @@ def sample_count_tokens(): client = aiplatform_v1beta1.PredictionServiceClient() # Initialize request argument(s) - instances = aiplatform_v1beta1.Value() - instances.null_value = "NULL_VALUE" - - contents = aiplatform_v1beta1.Content() - contents.parts.text = "text_value" - request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", - model="model_value", - instances=instances, - contents=contents, ) # Make the request diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 395eb394d0..1e023bf576 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.60.0" + "version": "1.61.0" }, "snippets": [ { @@ -22553,12 +22553,12 @@ "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_async", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -22568,18 +22568,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], @@ -22637,12 +22637,12 @@ "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_sync", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -22652,18 +22652,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], @@ -31909,6 +31909,183 @@ ], "title": "aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.create_notebook_execution_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.CreateNotebookExecutionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "CreateNotebookExecutionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateNotebookExecutionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "notebook_execution_job", + "type": "google.cloud.aiplatform_v1.types.NotebookExecutionJob" + }, + { + "name": "notebook_execution_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_notebook_execution_job" + }, + "description": "Sample for CreateNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_create_notebook_execution_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_async", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_create_notebook_execution_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.create_notebook_execution_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.CreateNotebookExecutionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "CreateNotebookExecutionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateNotebookExecutionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "notebook_execution_job", + "type": "google.cloud.aiplatform_v1.types.NotebookExecutionJob" + }, + { + "name": "notebook_execution_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_notebook_execution_job" + }, + "description": "Sample for CreateNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_create_notebook_execution_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_CreateNotebookExecutionJob_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_create_notebook_execution_job_sync.py" + }, { "canonical": true, "clientMethod": { @@ -32094,19 +32271,19 @@ "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.delete_notebook_runtime_template", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.delete_notebook_execution_job", "method": { - "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate", + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookExecutionJob", "service": { "fullName": "google.cloud.aiplatform.v1.NotebookService", "shortName": "NotebookService" }, - "shortName": "DeleteNotebookRuntimeTemplate" + "shortName": "DeleteNotebookExecutionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookExecutionJobRequest" }, { "name": "name", @@ -32126,13 +32303,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_notebook_runtime_template" + "shortName": "delete_notebook_execution_job" }, - "description": "Sample for DeleteNotebookRuntimeTemplate", - "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py", + "description": "Sample for DeleteNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_async", "segments": [ { "end": 55, @@ -32165,7 +32342,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py" + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_async.py" }, { "canonical": true, @@ -32174,7 +32351,168 @@ "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.delete_notebook_runtime_template", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.delete_notebook_execution_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookExecutionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "DeleteNotebookExecutionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookExecutionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_notebook_execution_job" + }, + "description": "Sample for DeleteNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookExecutionJob_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_execution_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.delete_notebook_runtime_template", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "DeleteNotebookRuntimeTemplate" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_notebook_runtime_template" + }, + "description": "Sample for DeleteNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.delete_notebook_runtime_template", "method": { "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate", "service": { @@ -32408,6 +32746,167 @@ ], "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.get_notebook_execution_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookExecutionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "GetNotebookExecutionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetNotebookExecutionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.NotebookExecutionJob", + "shortName": "get_notebook_execution_job" + }, + "description": "Sample for GetNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_execution_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_get_notebook_execution_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.get_notebook_execution_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookExecutionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "GetNotebookExecutionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetNotebookExecutionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.NotebookExecutionJob", + "shortName": "get_notebook_execution_job" + }, + "description": "Sample for GetNotebookExecutionJob", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_execution_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookExecutionJob_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_get_notebook_execution_job_sync.py" + }, { "canonical": true, "clientMethod": { @@ -32730,6 +33229,167 @@ ], "title": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.list_notebook_execution_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookExecutionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "ListNotebookExecutionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookExecutionJobsAsyncPager", + "shortName": "list_notebook_execution_jobs" + }, + "description": "Sample for ListNotebookExecutionJobs", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.list_notebook_execution_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookExecutionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "ListNotebookExecutionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListNotebookExecutionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookExecutionJobsPager", + "shortName": "list_notebook_execution_jobs" + }, + "description": "Sample for ListNotebookExecutionJobs", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookExecutionJobs_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_notebook_service_list_notebook_execution_jobs_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 8b7b405bf8..506e06f4c3 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.60.0" + "version": "1.61.0" }, "snippets": [ { @@ -41442,12 +41442,12 @@ "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_async", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -41457,18 +41457,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], @@ -41526,12 +41526,12 @@ "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_sync", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -41541,18 +41541,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py index 1bc50c9b8d..08fbaf3ee0 100644 --- a/samples/model-builder/conftest.py +++ b/samples/model-builder/conftest.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock +from unittest.mock import patch from google.cloud import aiplatform +import vertexai from vertexai.resources import preview as preview_resources import pytest @@ -25,6 +27,12 @@ def mock_sdk_init(): yield mock +@pytest.fixture +def mock_vertexai_init(): + with patch.object(vertexai, "init") as mock: + yield mock + + """ ---------------------------------------------------------------------------- Dataset Fixtures diff --git a/samples/model-builder/init_sample.py b/samples/model-builder/init_sample.py index 40e74881a3..d27c9e6e39 100644 --- a/samples/model-builder/init_sample.py +++ b/samples/model-builder/init_sample.py @@ -28,9 +28,9 @@ def init_sample( service_account: Optional[str] = None, ): - from google.cloud import aiplatform + import vertexai - aiplatform.init( + vertexai.init( project=project, location=location, experiment=experiment, diff --git a/samples/model-builder/init_sample_test.py b/samples/model-builder/init_sample_test.py index 2ac5284e09..d5e7b2e9de 100644 --- a/samples/model-builder/init_sample_test.py +++ b/samples/model-builder/init_sample_test.py @@ -17,7 +17,7 @@ import test_constants as constants -def test_init_sample(mock_sdk_init): +def test_init_sample(mock_vertexai_init): init_sample.init_sample( project=constants.PROJECT, @@ -29,7 +29,7 @@ def test_init_sample(mock_sdk_init): service_account=constants.SERVICE_ACCOUNT, ) - mock_sdk_init.assert_called_once_with( + mock_vertexai_init.assert_called_once_with( project=constants.PROJECT, location=constants.LOCATION_EUROPE, experiment=constants.EXPERIMENT_NAME, diff --git a/samples/snippets/prediction_service/predict_custom_trained_model_sample.py b/samples/snippets/prediction_service/predict_custom_trained_model_sample.py index cf7a6d2102..d25cb0b1a7 100644 --- a/samples/snippets/prediction_service/predict_custom_trained_model_sample.py +++ b/samples/snippets/prediction_service/predict_custom_trained_model_sample.py @@ -54,7 +54,7 @@ def predict_custom_trained_model_sample( # The predictions are a google.protobuf.Value representation of the model's predictions. predictions = response.predictions for prediction in predictions: - print(" prediction:", dict(prediction)) + print(" prediction:", prediction) # [END aiplatform_predict_custom_trained_model_sample] diff --git a/setup.py b/setup.py index e3c06b004f..d870daa42c 100644 --- a/setup.py +++ b/setup.py @@ -248,7 +248,7 @@ ), "google-auth >= 2.14.1, <3.0.0dev", "proto-plus >= 1.22.3, <2.0.0dev", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", "packaging >= 14.3", "google-cloud-storage >= 1.32.0, < 3.0.0dev", "google-cloud-bigquery >= 1.15.0, < 4.0.0dev, !=3.20.0", diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py index 455ee0791a..31c494c9f5 100644 --- a/tests/unit/aiplatform/test_initializer.py +++ b/tests/unit/aiplatform/test_initializer.py @@ -435,6 +435,25 @@ def test_get_client_options_with_api_override(self): assert client_options.api_endpoint == "asia-east1-override.googleapis.com" + def test_get_resource_type(self): + initializer.global_config.init() + os.environ["VERTEX_PRODUCT"] = "COLAB_ENTERPRISE" + assert initializer.global_config.get_resource_type().value == ( + "COLAB_ENTERPRISE" + ) + + initializer.global_config.init() + os.environ["VERTEX_PRODUCT"] = "WORKBENCH_INSTANCE" + assert initializer.global_config.get_resource_type().value == ( + "WORKBENCH_INSTANCE" + ) + + initializer.global_config.init() + os.environ["VERTEX_PRODUCT"] = "WORKBENCH_CUSTOM_CONTAINER" + assert initializer.global_config.get_resource_type().value == ( + "WORKBENCH_CUSTOM_CONTAINER" + ) + def test_init_with_only_creds_does_not_override_set_project(self): assert initializer.global_config.project is not _TEST_PROJECT_2 initializer.global_config.init(project=_TEST_PROJECT_2) diff --git a/tests/unit/aiplatform/test_vision_models.py b/tests/unit/aiplatform/test_vision_models.py index 4ddb128f04..0323d0f5bb 100644 --- a/tests/unit/aiplatform/test_vision_models.py +++ b/tests/unit/aiplatform/test_vision_models.py @@ -203,7 +203,7 @@ def teardown_method(self): def _get_image_generation_model( self, - ) -> preview_vision_models.ImageGenerationModel: + ) -> ga_vision_models.ImageGenerationModel: """Gets the image generation model.""" aiplatform.init( project=_TEST_PROJECT, @@ -216,7 +216,7 @@ def _get_image_generation_model( _IMAGE_GENERATION_PUBLISHER_MODEL_DICT ), ) as mock_get_publisher_model: - model = preview_vision_models.ImageGenerationModel.from_pretrained( + model = ga_vision_models.ImageGenerationModel.from_pretrained( "imagegeneration@002" ) diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index 48e6112747..8cf5b53e00 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -1308,12 +1308,7 @@ async def test_create_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_dataset ] = mock_object @@ -1709,12 +1704,7 @@ async def test_get_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_dataset ] = mock_object @@ -2105,12 +2095,7 @@ async def test_update_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_dataset ] = mock_object @@ -2500,12 +2485,7 @@ async def test_list_datasets_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_datasets ] = mock_object @@ -3058,12 +3038,7 @@ async def test_delete_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_dataset ] = mock_object @@ -3427,12 +3402,7 @@ async def test_import_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_data ] = mock_object @@ -3818,12 +3788,7 @@ async def test_export_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_data ] = mock_object @@ -4234,12 +4199,7 @@ async def test_create_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_dataset_version ] = mock_object @@ -4646,12 +4606,7 @@ async def test_update_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_dataset_version ] = mock_object @@ -5056,12 +5011,7 @@ async def test_delete_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_dataset_version ] = mock_object @@ -5461,12 +5411,7 @@ async def test_get_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_dataset_version ] = mock_object @@ -5868,12 +5813,7 @@ async def test_list_dataset_versions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_dataset_versions ] = mock_object @@ -6458,12 +6398,7 @@ async def test_restore_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.restore_dataset_version ] = mock_object @@ -6845,12 +6780,7 @@ async def test_list_data_items_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_data_items ] = mock_object @@ -7428,12 +7358,7 @@ async def test_search_data_items_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_data_items ] = mock_object @@ -7937,12 +7862,7 @@ async def test_list_saved_queries_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_saved_queries ] = mock_object @@ -8526,12 +8446,7 @@ async def test_delete_saved_query_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_saved_query ] = mock_object @@ -8925,12 +8840,7 @@ async def test_get_annotation_spec_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_annotation_spec ] = mock_object @@ -9317,12 +9227,7 @@ async def test_list_annotations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_annotations ] = mock_object @@ -9957,7 +9862,7 @@ def test_create_dataset_rest_required_fields( response = client.create_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10285,7 +10190,7 @@ def test_get_dataset_rest_required_fields( response = client.get_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10702,7 +10607,7 @@ def test_update_dataset_rest_required_fields( response = client.update_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11027,7 +10932,7 @@ def test_list_datasets_rest_required_fields( response = client.list_datasets(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11393,7 +11298,7 @@ def test_delete_dataset_rest_required_fields( response = client.delete_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11692,7 +11597,7 @@ def test_import_data_rest_required_fields( response = client.import_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12006,7 +11911,7 @@ def test_export_data_rest_required_fields( response = client.export_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12415,7 +12320,7 @@ def test_create_dataset_version_rest_required_fields( response = client.create_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12831,7 +12736,7 @@ def test_update_dataset_version_rest_required_fields( response = client.update_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13155,7 +13060,7 @@ def test_delete_dataset_version_rest_required_fields( response = client.delete_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13478,7 +13383,7 @@ def test_get_dataset_version_rest_required_fields( response = client.get_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13800,7 +13705,7 @@ def test_list_dataset_versions_rest_required_fields( response = client.list_dataset_versions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14178,7 +14083,7 @@ def test_restore_dataset_version_rest_required_fields( response = client.restore_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14495,7 +14400,7 @@ def test_list_data_items_rest_required_fields( response = client.list_data_items(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14889,7 +14794,7 @@ def test_search_data_items_rest_required_fields( response = client.search_data_items(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15226,7 +15131,7 @@ def test_list_saved_queries_rest_required_fields( response = client.list_saved_queries(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15603,7 +15508,7 @@ def test_delete_saved_query_rest_required_fields( response = client.delete_saved_query(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15922,7 +15827,7 @@ def test_get_annotation_spec_rest_required_fields( response = client.get_annotation_spec(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16243,7 +16148,7 @@ def test_list_annotations_rest_required_fields( response = client.list_annotations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py index 689262dd56..1398b39b8a 100644 --- a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py @@ -1426,12 +1426,7 @@ async def test_create_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_deployment_resource_pool ] = mock_object @@ -1866,12 +1861,7 @@ async def test_get_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_deployment_resource_pool ] = mock_object @@ -2275,12 +2265,7 @@ async def test_list_deployment_resource_pools_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_deployment_resource_pools ] = mock_object @@ -2882,12 +2867,7 @@ async def test_update_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_deployment_resource_pool ] = mock_object @@ -3305,12 +3285,7 @@ async def test_delete_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_deployment_resource_pool ] = mock_object @@ -3709,12 +3684,7 @@ async def test_query_deployed_models_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_deployed_models ] = mock_object @@ -4289,7 +4259,7 @@ def test_create_deployment_resource_pool_rest_required_fields( response = client.create_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4629,7 +4599,7 @@ def test_get_deployment_resource_pool_rest_required_fields( response = client.get_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4967,7 +4937,7 @@ def test_list_deployment_resource_pools_rest_required_fields( response = client.list_deployment_resource_pools(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5452,7 +5422,7 @@ def test_update_deployment_resource_pool_rest_required_fields( response = client.update_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5787,7 +5757,7 @@ def test_delete_deployment_resource_pool_rest_required_fields( response = client.delete_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6127,7 +6097,7 @@ def test_query_deployed_models_rest_required_fields( response = client.query_deployed_models(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index dc12d56124..07e3218cd7 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -1333,12 +1333,7 @@ async def test_create_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_endpoint ] = mock_object @@ -1587,6 +1582,10 @@ def test_get_endpoint(request_type, transport: str = "grpc"): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_endpoint(request) @@ -1608,6 +1607,10 @@ def test_get_endpoint(request_type, transport: str = "grpc"): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_endpoint_empty_call(): @@ -1713,6 +1716,10 @@ async def test_get_endpoint_empty_call_async(): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_endpoint() @@ -1744,12 +1751,7 @@ async def test_get_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_endpoint ] = mock_object @@ -1792,6 +1794,10 @@ async def test_get_endpoint_async( network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_endpoint(request) @@ -1814,6 +1820,10 @@ async def test_get_endpoint_async( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2130,12 +2140,7 @@ async def test_list_endpoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_endpoints ] = mock_object @@ -2553,6 +2558,10 @@ def test_update_endpoint(request_type, transport: str = "grpc"): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_endpoint(request) @@ -2574,6 +2583,10 @@ def test_update_endpoint(request_type, transport: str = "grpc"): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_endpoint_empty_call(): @@ -2675,6 +2688,10 @@ async def test_update_endpoint_empty_call_async(): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_endpoint() @@ -2706,12 +2723,7 @@ async def test_update_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_endpoint ] = mock_object @@ -2754,6 +2766,10 @@ async def test_update_endpoint_async( network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_endpoint(request) @@ -2776,6 +2792,10 @@ async def test_update_endpoint_async( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -3099,12 +3119,7 @@ async def test_delete_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_endpoint ] = mock_object @@ -3468,12 +3483,7 @@ async def test_deploy_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.deploy_model ] = mock_object @@ -3895,12 +3905,7 @@ async def test_undeploy_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.undeploy_model ] = mock_object @@ -4297,12 +4302,7 @@ async def test_mutate_deployed_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_deployed_model ] = mock_object @@ -4694,6 +4694,10 @@ def test_create_endpoint_rest(request_type): "sampling_rate": 0.13820000000000002, "bigquery_destination": {"output_uri": "output_uri_value"}, }, + "dedicated_endpoint_enabled": True, + "dedicated_endpoint_dns": "dedicated_endpoint_dns_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -4891,7 +4895,7 @@ def test_create_endpoint_rest_required_fields( response = client.create_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5088,6 +5092,10 @@ def test_get_endpoint_rest(request_type): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -5113,6 +5121,10 @@ def test_get_endpoint_rest(request_type): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_endpoint_rest_use_cached_wrapped_rpc(): @@ -5220,7 +5232,7 @@ def test_get_endpoint_rest_required_fields( response = client.get_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5532,7 +5544,7 @@ def test_list_endpoints_rest_required_fields( response = client.list_endpoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5883,6 +5895,10 @@ def test_update_endpoint_rest(request_type): "sampling_rate": 0.13820000000000002, "bigquery_destination": {"output_uri": "output_uri_value"}, }, + "dedicated_endpoint_enabled": True, + "dedicated_endpoint_dns": "dedicated_endpoint_dns_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -5964,6 +5980,10 @@ def get_message_fields(field): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -5989,6 +6009,10 @@ def get_message_fields(field): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_endpoint_rest_use_cached_wrapped_rpc(): @@ -6094,7 +6118,7 @@ def test_update_endpoint_rest_required_fields( response = client.update_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6407,7 +6431,7 @@ def test_delete_endpoint_rest_required_fields( response = client.delete_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6708,7 +6732,7 @@ def test_deploy_model_rest_required_fields( response = client.deploy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7038,7 +7062,7 @@ def test_undeploy_model_rest_required_fields( response = client.undeploy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7357,7 +7381,7 @@ def test_mutate_deployed_model_rest_required_fields( response = client.mutate_deployed_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py index 6789bfff05..bd4d3b52fa 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py @@ -1434,12 +1434,7 @@ async def test_create_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_online_store ] = mock_object @@ -1896,12 +1891,7 @@ async def test_get_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_online_store ] = mock_object @@ -2309,12 +2299,7 @@ async def test_list_feature_online_stores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_online_stores ] = mock_object @@ -2914,12 +2899,7 @@ async def test_update_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_online_store ] = mock_object @@ -3361,12 +3341,7 @@ async def test_delete_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_online_store ] = mock_object @@ -3765,12 +3740,7 @@ async def test_create_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_view ] = mock_object @@ -4195,12 +4165,7 @@ async def test_get_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_view ] = mock_object @@ -4585,12 +4550,7 @@ async def test_list_feature_views_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_views ] = mock_object @@ -5176,12 +5136,7 @@ async def test_update_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_view ] = mock_object @@ -5602,12 +5557,7 @@ async def test_delete_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_view ] = mock_object @@ -5995,12 +5945,7 @@ async def test_sync_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.sync_feature_view ] = mock_object @@ -6390,12 +6335,7 @@ async def test_get_feature_view_sync_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_view_sync ] = mock_object @@ -6797,12 +6737,7 @@ async def test_list_feature_view_syncs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_view_syncs ] = mock_object @@ -7482,6 +7417,7 @@ def test_create_feature_online_store_rest_required_fields( "featureOnlineStoreId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7826,7 +7762,7 @@ def test_get_feature_online_store_rest_required_fields( response = client.get_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8160,7 +8096,7 @@ def test_list_feature_online_stores_rest_required_fields( response = client.list_feature_online_stores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8654,7 +8590,7 @@ def test_update_feature_online_store_rest_required_fields( response = client.update_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8989,7 +8925,7 @@ def test_delete_feature_online_store_rest_required_fields( response = client.delete_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9429,6 +9365,7 @@ def test_create_feature_view_rest_required_fields( "featureViewId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9771,7 +9708,7 @@ def test_get_feature_view_rest_required_fields( response = client.get_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10101,7 +10038,7 @@ def test_list_feature_views_rest_required_fields( response = client.list_feature_views(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10593,7 +10530,7 @@ def test_update_feature_view_rest_required_fields( response = client.update_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10919,7 +10856,7 @@ def test_delete_feature_view_rest_required_fields( response = client.delete_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11242,7 +11179,7 @@ def test_sync_feature_view_rest_required_fields( response = client.sync_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11563,7 +11500,7 @@ def test_get_feature_view_sync_rest_required_fields( response = client.get_feature_view_sync(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11897,7 +11834,7 @@ def test_list_feature_view_syncs_rest_required_fields( response = client.list_feature_view_syncs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py index 39dff30efe..6f1fc60387 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py @@ -1383,12 +1383,7 @@ async def test_fetch_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.fetch_feature_values ] = mock_object @@ -1780,12 +1775,7 @@ async def test_search_nearest_entities_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_nearest_entities ] = mock_object @@ -2062,7 +2052,7 @@ def test_fetch_feature_values_rest_required_fields( response = client.fetch_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2389,7 +2379,7 @@ def test_search_nearest_entities_rest_required_fields( response = client.search_nearest_entities(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py index f72da8b815..dd961c5f03 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py @@ -1392,12 +1392,7 @@ async def test_create_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_group ] = mock_object @@ -1833,12 +1828,7 @@ async def test_get_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_group ] = mock_object @@ -2235,12 +2225,7 @@ async def test_list_feature_groups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_groups ] = mock_object @@ -2820,12 +2805,7 @@ async def test_update_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_group ] = mock_object @@ -3246,12 +3226,7 @@ async def test_delete_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_group ] = mock_object @@ -3638,12 +3613,7 @@ async def test_create_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature ] = mock_object @@ -4047,12 +4017,7 @@ async def test_get_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature ] = mock_object @@ -4430,12 +4395,7 @@ async def test_list_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_features ] = mock_object @@ -4985,12 +4945,7 @@ async def test_update_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature ] = mock_object @@ -5365,12 +5320,7 @@ async def test_delete_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature ] = mock_object @@ -5816,6 +5766,7 @@ def test_create_feature_group_rest_required_fields( "featureGroupId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6147,7 +6098,7 @@ def test_get_feature_group_rest_required_fields( response = client.get_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6470,7 +6421,7 @@ def test_list_feature_groups_rest_required_fields( response = client.list_feature_groups(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6932,7 +6883,7 @@ def test_update_feature_group_rest_required_fields( response = client.update_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7256,7 +7207,7 @@ def test_delete_feature_group_rest_required_fields( response = client.delete_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7670,6 +7621,7 @@ def test_create_feature_rest_required_fields( "featureId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8004,7 +7956,7 @@ def test_get_feature_rest_required_fields( response = client.get_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8322,7 +8274,7 @@ def test_list_features_rest_required_fields( response = client.list_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8791,7 +8743,7 @@ def test_update_feature_rest_required_fields( response = client.update_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9102,7 +9054,7 @@ def test_delete_feature_rest_required_fields( response = client.delete_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py index 3b5e77c953..b63ba8867e 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -1415,12 +1415,7 @@ async def test_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_feature_values ] = mock_object @@ -1810,12 +1805,7 @@ async def test_streaming_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_read_feature_values ] = mock_object @@ -2201,12 +2191,7 @@ async def test_write_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_feature_values ] = mock_object @@ -2605,7 +2590,7 @@ def test_read_feature_values_rest_required_fields( response = client.read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2951,7 +2936,7 @@ def test_streaming_read_feature_values_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.streaming_read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3288,7 +3273,7 @@ def test_write_feature_values_rest_required_fields( response = client.write_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py index b880e000ce..f03a0772de 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -1393,12 +1393,7 @@ async def test_create_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_featurestore ] = mock_object @@ -1805,12 +1800,7 @@ async def test_get_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_featurestore ] = mock_object @@ -2199,12 +2189,7 @@ async def test_list_featurestores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_featurestores ] = mock_object @@ -2784,12 +2769,7 @@ async def test_update_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_featurestore ] = mock_object @@ -3186,12 +3166,7 @@ async def test_delete_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_featurestore ] = mock_object @@ -3590,12 +3565,7 @@ async def test_create_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_entity_type ] = mock_object @@ -4000,12 +3970,7 @@ async def test_get_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_entity_type ] = mock_object @@ -4392,12 +4357,7 @@ async def test_list_entity_types_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_entity_types ] = mock_object @@ -4987,12 +4947,7 @@ async def test_update_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_entity_type ] = mock_object @@ -5394,12 +5349,7 @@ async def test_delete_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_entity_type ] = mock_object @@ -5786,12 +5736,7 @@ async def test_create_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature ] = mock_object @@ -6189,12 +6134,7 @@ async def test_batch_create_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_features ] = mock_object @@ -6598,12 +6538,7 @@ async def test_get_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature ] = mock_object @@ -6981,12 +6916,7 @@ async def test_list_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_features ] = mock_object @@ -7555,12 +7485,7 @@ async def test_update_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature ] = mock_object @@ -7942,12 +7867,7 @@ async def test_delete_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature ] = mock_object @@ -8329,12 +8249,7 @@ async def test_import_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_feature_values ] = mock_object @@ -8722,12 +8637,7 @@ async def test_batch_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_read_feature_values ] = mock_object @@ -9115,12 +9025,7 @@ async def test_export_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_feature_values ] = mock_object @@ -9508,12 +9413,7 @@ async def test_delete_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_values ] = mock_object @@ -9893,12 +9793,7 @@ async def test_search_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_features ] = mock_object @@ -10546,6 +10441,7 @@ def test_create_featurestore_rest_required_fields( "featurestoreId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10870,7 +10766,7 @@ def test_get_featurestore_rest_required_fields( response = client.get_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11191,7 +11087,7 @@ def test_list_featurestores_rest_required_fields( response = client.list_featurestores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11653,7 +11549,7 @@ def test_update_featurestore_rest_required_fields( response = client.update_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11968,7 +11864,7 @@ def test_delete_featurestore_rest_required_fields( response = client.delete_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12378,6 +12274,7 @@ def test_create_entity_type_rest_required_fields( "entityTypeId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12705,7 +12602,7 @@ def test_get_entity_type_rest_required_fields( response = client.get_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13026,7 +12923,7 @@ def test_list_entity_types_rest_required_fields( response = client.list_entity_types(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13503,7 +13400,7 @@ def test_update_entity_type_rest_required_fields( response = client.update_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13820,7 +13717,7 @@ def test_delete_entity_type_rest_required_fields( response = client.delete_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14235,6 +14132,7 @@ def test_create_feature_rest_required_fields( "featureId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14559,7 +14457,7 @@ def test_batch_create_features_rest_required_fields( response = client.batch_create_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14891,7 +14789,7 @@ def test_get_feature_rest_required_fields( response = client.get_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15209,7 +15107,7 @@ def test_list_features_rest_required_fields( response = client.list_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15694,7 +15592,7 @@ def test_update_feature_rest_required_fields( response = client.update_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16003,7 +15901,7 @@ def test_delete_feature_rest_required_fields( response = client.delete_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16314,7 +16212,7 @@ def test_import_feature_values_rest_required_fields( response = client.import_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16634,7 +16532,7 @@ def test_batch_read_feature_values_rest_required_fields( response = client.batch_read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16955,7 +16853,7 @@ def test_export_feature_values_rest_required_fields( response = client.export_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17276,7 +17174,7 @@ def test_delete_feature_values_rest_required_fields( response = client.delete_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17592,7 +17490,7 @@ def test_search_features_rest_required_fields( response = client.search_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py index 2a695641c3..e0e20bf3e6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py @@ -1368,12 +1368,7 @@ async def test_create_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tuning_job ] = mock_object @@ -1779,12 +1774,7 @@ async def test_get_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tuning_job ] = mock_object @@ -2163,12 +2153,7 @@ async def test_list_tuning_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tuning_jobs ] = mock_object @@ -2726,12 +2711,7 @@ async def test_cancel_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_tuning_job ] = mock_object @@ -2985,9 +2965,11 @@ def test_create_tuning_job_rest(request_type): "tuning_dataset_example_count": 2989, "total_tuning_character_count": 2988, "total_billable_character_count": 3150, + "total_billable_token_count": 2754, "tuning_step_count": 1848, "user_input_token_distribution": { "sum": 341, + "billable_sum": 1259, "min_": 0.419, "max_": 0.421, "mean": 0.417, @@ -3241,7 +3223,7 @@ def test_create_tuning_job_rest_required_fields( response = client.create_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3562,7 +3544,7 @@ def test_get_tuning_job_rest_required_fields( response = client.get_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3874,7 +3856,7 @@ def test_list_tuning_jobs_rest_required_fields( response = client.list_tuning_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4237,7 +4219,7 @@ def test_cancel_tuning_job_rest_required_fields( response = client.cancel_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 78ded036cf..80ad6f24aa 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -1386,12 +1386,7 @@ async def test_create_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_index_endpoint ] = mock_object @@ -1810,12 +1805,7 @@ async def test_get_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_index_endpoint ] = mock_object @@ -2220,12 +2210,7 @@ async def test_list_index_endpoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_index_endpoints ] = mock_object @@ -2828,12 +2813,7 @@ async def test_update_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_index_endpoint ] = mock_object @@ -3244,12 +3224,7 @@ async def test_delete_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_index_endpoint ] = mock_object @@ -3624,12 +3599,7 @@ async def test_deploy_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.deploy_index ] = mock_object @@ -4006,12 +3976,7 @@ async def test_undeploy_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.undeploy_index ] = mock_object @@ -4399,12 +4364,7 @@ async def test_mutate_deployed_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_deployed_index ] = mock_object @@ -4915,7 +4875,7 @@ def test_create_index_endpoint_rest_required_fields( response = client.create_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5247,7 +5207,7 @@ def test_get_index_endpoint_rest_required_fields( response = client.get_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5569,7 +5529,7 @@ def test_list_index_endpoints_rest_required_fields( response = client.list_index_endpoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6108,7 +6068,7 @@ def test_update_index_endpoint_rest_required_fields( response = client.update_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6431,7 +6391,7 @@ def test_delete_index_endpoint_rest_required_fields( response = client.delete_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6736,7 +6696,7 @@ def test_deploy_index_rest_required_fields( response = client.deploy_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7056,7 +7016,7 @@ def test_undeploy_index_rest_required_fields( response = client.undeploy_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7490,7 +7450,7 @@ def test_mutate_deployed_index_rest_required_fields( response = client.mutate_deployed_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py index 990c487298..595fb98c65 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -1277,12 +1277,7 @@ async def test_create_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_index ] = mock_object @@ -1670,12 +1665,7 @@ async def test_get_index_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_index ] = mock_object @@ -2049,12 +2039,7 @@ async def test_list_indexes_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_indexes ] = mock_object @@ -2603,12 +2588,7 @@ async def test_update_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_index ] = mock_object @@ -2982,12 +2962,7 @@ async def test_delete_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_index ] = mock_object @@ -3357,12 +3332,7 @@ async def test_upsert_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upsert_datapoints ] = mock_object @@ -3652,12 +3622,7 @@ async def test_remove_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.remove_datapoints ] = mock_object @@ -4019,7 +3984,7 @@ def test_create_index_rest_required_fields( response = client.create_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4339,7 +4304,7 @@ def test_get_index_rest_required_fields(request_type=index_service.GetIndexReque response = client.get_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4646,7 +4611,7 @@ def test_list_indexes_rest_required_fields( response = client.list_indexes(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5108,7 +5073,7 @@ def test_update_index_rest_required_fields( response = client.update_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5413,7 +5378,7 @@ def test_delete_index_rest_required_fields( response = client.delete_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5715,7 +5680,7 @@ def test_upsert_datapoints_rest_required_fields( response = client.upsert_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5961,7 +5926,7 @@ def test_remove_datapoints_rest_required_fields( response = client.remove_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 7a1aad040d..a04938cb6c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -1312,12 +1312,7 @@ async def test_create_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_custom_job ] = mock_object @@ -1711,12 +1706,7 @@ async def test_get_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_custom_job ] = mock_object @@ -2090,12 +2080,7 @@ async def test_list_custom_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_custom_jobs ] = mock_object @@ -2658,12 +2643,7 @@ async def test_delete_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_custom_job ] = mock_object @@ -3041,12 +3021,7 @@ async def test_cancel_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_custom_job ] = mock_object @@ -3448,12 +3423,7 @@ async def test_create_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_data_labeling_job ] = mock_object @@ -3891,12 +3861,7 @@ async def test_get_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_data_labeling_job ] = mock_object @@ -4305,12 +4270,7 @@ async def test_list_data_labeling_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_data_labeling_jobs ] = mock_object @@ -4895,12 +4855,7 @@ async def test_delete_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_data_labeling_job ] = mock_object @@ -5282,12 +5237,7 @@ async def test_cancel_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_data_labeling_job ] = mock_object @@ -5681,12 +5631,7 @@ async def test_create_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_hyperparameter_tuning_job ] = mock_object @@ -6121,12 +6066,7 @@ async def test_get_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_hyperparameter_tuning_job ] = mock_object @@ -6528,12 +6468,7 @@ async def test_list_hyperparameter_tuning_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_hyperparameter_tuning_jobs ] = mock_object @@ -7124,12 +7059,7 @@ async def test_delete_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_hyperparameter_tuning_job ] = mock_object @@ -7511,12 +7441,7 @@ async def test_cancel_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_hyperparameter_tuning_job ] = mock_object @@ -7891,12 +7816,7 @@ async def test_create_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_nas_job ] = mock_object @@ -8281,12 +8201,7 @@ async def test_get_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_nas_job ] = mock_object @@ -8656,12 +8571,7 @@ async def test_list_nas_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_nas_jobs ] = mock_object @@ -9214,12 +9124,7 @@ async def test_delete_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_nas_job ] = mock_object @@ -9577,12 +9482,7 @@ async def test_cancel_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_nas_job ] = mock_object @@ -9952,12 +9852,7 @@ async def test_get_nas_trial_detail_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_nas_trial_detail ] = mock_object @@ -10348,12 +10243,7 @@ async def test_list_nas_trial_details_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_nas_trial_details ] = mock_object @@ -10959,12 +10849,7 @@ async def test_create_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_batch_prediction_job ] = mock_object @@ -11405,12 +11290,7 @@ async def test_get_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_batch_prediction_job ] = mock_object @@ -11816,12 +11696,7 @@ async def test_list_batch_prediction_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_batch_prediction_jobs ] = mock_object @@ -12410,12 +12285,7 @@ async def test_delete_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_batch_prediction_job ] = mock_object @@ -12797,12 +12667,7 @@ async def test_cancel_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_batch_prediction_job ] = mock_object @@ -13207,12 +13072,7 @@ async def test_create_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_model_deployment_monitoring_job ] = mock_object @@ -13671,12 +13531,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_use_cach ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_model_deployment_monitoring_stats_anomalies ] = mock_object @@ -14335,12 +14190,7 @@ async def test_get_model_deployment_monitoring_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_deployment_monitoring_job ] = mock_object @@ -14757,12 +14607,7 @@ async def test_list_model_deployment_monitoring_jobs_async_use_cached_wrapped_rp ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_deployment_monitoring_jobs ] = mock_object @@ -15349,12 +15194,7 @@ async def test_update_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_model_deployment_monitoring_job ] = mock_object @@ -15764,12 +15604,7 @@ async def test_delete_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_deployment_monitoring_job ] = mock_object @@ -16151,12 +15986,7 @@ async def test_pause_model_deployment_monitoring_job_async_use_cached_wrapped_rp ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.pause_model_deployment_monitoring_job ] = mock_object @@ -16528,12 +16358,7 @@ async def test_resume_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.resume_model_deployment_monitoring_job ] = mock_object @@ -16793,6 +16618,7 @@ def test_create_custom_job_rest(request_type): "scheduling": { "timeout": {"seconds": 751, "nanos": 543}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -17033,7 +16859,7 @@ def test_create_custom_job_rest_required_fields( response = client.create_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17349,7 +17175,7 @@ def test_get_custom_job_rest_required_fields( response = client.get_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17662,7 +17488,7 @@ def test_list_custom_jobs_rest_required_fields( response = client.list_custom_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18029,7 +17855,7 @@ def test_delete_custom_job_rest_required_fields( response = client.delete_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18328,7 +18154,7 @@ def test_cancel_custom_job_rest_required_fields( response = client.cancel_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18763,7 +18589,7 @@ def test_create_data_labeling_job_rest_required_fields( response = client.create_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19099,7 +18925,7 @@ def test_get_data_labeling_job_rest_required_fields( response = client.get_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19421,7 +19247,7 @@ def test_list_data_labeling_jobs_rest_required_fields( response = client.list_data_labeling_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19795,7 +19621,7 @@ def test_delete_data_labeling_job_rest_required_fields( response = client.delete_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20102,7 +19928,7 @@ def test_cancel_data_labeling_job_rest_required_fields( response = client.cancel_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20386,6 +20212,7 @@ def test_create_hyperparameter_tuning_job_rest(request_type): "scheduling": { "timeout": {}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -20676,7 +20503,7 @@ def test_create_hyperparameter_tuning_job_rest_required_fields( response = client.create_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21021,7 +20848,7 @@ def test_get_hyperparameter_tuning_job_rest_required_fields( response = client.get_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21350,7 +21177,7 @@ def test_list_hyperparameter_tuning_jobs_rest_required_fields( response = client.list_hyperparameter_tuning_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21734,7 +21561,7 @@ def test_delete_hyperparameter_tuning_job_rest_required_fields( response = client.delete_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22044,7 +21871,7 @@ def test_cancel_hyperparameter_tuning_job_rest_required_fields( response = client.cancel_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22267,6 +22094,7 @@ def test_create_nas_job_rest(request_type): "scheduling": { "timeout": {"seconds": 751, "nanos": 543}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -22542,7 +22370,7 @@ def test_create_nas_job_rest_required_fields( response = client.create_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22856,7 +22684,7 @@ def test_get_nas_job_rest_required_fields(request_type=job_service.GetNasJobRequ response = client.get_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23163,7 +22991,7 @@ def test_list_nas_jobs_rest_required_fields( response = client.list_nas_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23524,7 +23352,7 @@ def test_delete_nas_job_rest_required_fields( response = client.delete_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23819,7 +23647,7 @@ def test_cancel_nas_job_rest_required_fields( response = client.cancel_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24120,7 +23948,7 @@ def test_get_nas_trial_detail_rest_required_fields( response = client.get_nas_trial_detail(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24439,7 +24267,7 @@ def test_list_nas_trial_details_rest_required_fields( response = client.list_nas_trial_details(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25037,7 +24865,7 @@ def test_create_batch_prediction_job_rest_required_fields( response = client.create_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25375,7 +25203,7 @@ def test_get_batch_prediction_job_rest_required_fields( response = client.get_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25696,7 +25524,7 @@ def test_list_batch_prediction_jobs_rest_required_fields( response = client.list_batch_prediction_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26071,7 +25899,7 @@ def test_delete_batch_prediction_job_rest_required_fields( response = client.delete_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26378,7 +26206,7 @@ def test_cancel_batch_prediction_job_rest_required_fields( response = client.cancel_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26884,7 +26712,7 @@ def test_create_model_deployment_monitoring_job_rest_required_fields( response = client.create_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -27253,7 +27081,7 @@ def test_search_model_deployment_monitoring_stats_anomalies_rest_required_fields request ) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -27697,7 +27525,7 @@ def test_get_model_deployment_monitoring_job_rest_required_fields( response = client.get_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -28037,7 +27865,7 @@ def test_list_model_deployment_monitoring_jobs_rest_required_fields( response = client.list_model_deployment_monitoring_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -28603,7 +28431,7 @@ def test_update_model_deployment_monitoring_job_rest_required_fields( response = client.update_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -28942,7 +28770,7 @@ def test_delete_model_deployment_monitoring_job_rest_required_fields( response = client.delete_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -29260,7 +29088,7 @@ def test_pause_model_deployment_monitoring_job_rest_required_fields( response = client.pause_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -29567,7 +29395,7 @@ def test_resume_model_deployment_monitoring_job_rest_required_fields( response = client.resume_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py index 3cee198f02..af206d66b6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py @@ -54,6 +54,7 @@ from google.cloud.aiplatform_v1.services.llm_utility_service import transports from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import llm_utility_service +from google.cloud.aiplatform_v1.types import openapi from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.aiplatform_v1.types import tool from google.cloud.location import locations_pb2 @@ -1339,12 +1340,7 @@ async def test_count_tokens_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.count_tokens ] = mock_object @@ -1717,12 +1713,7 @@ async def test_compute_tokens_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.compute_tokens ] = mock_object @@ -2015,7 +2006,6 @@ def test_count_tokens_rest_required_fields( request_init = {} request_init["endpoint"] = "" - request_init["model"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -2032,7 +2022,6 @@ def test_count_tokens_rest_required_fields( # verify required fields with default values are now present jsonified_request["endpoint"] = "endpoint_value" - jsonified_request["model"] = "model_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() @@ -2042,8 +2031,6 @@ def test_count_tokens_rest_required_fields( # verify required fields with non-default values are left alone assert "endpoint" in jsonified_request assert jsonified_request["endpoint"] == "endpoint_value" - assert "model" in jsonified_request - assert jsonified_request["model"] == "model_value" client = LlmUtilityServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2082,7 +2069,7 @@ def test_count_tokens_rest_required_fields( response = client.count_tokens(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2093,17 +2080,7 @@ def test_count_tokens_rest_unset_required_fields(): ) unset_fields = transport.count_tokens._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "endpoint", - "model", - "instances", - "contents", - ) - ) - ) + assert set(unset_fields) == (set(()) & set(("endpoint",))) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -2397,7 +2374,7 @@ def test_compute_tokens_rest_required_fields( response = client.compute_tokens(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_match_service.py b/tests/unit/gapic/aiplatform_v1/test_match_service.py index 3a9a4ac206..f4acd34ac0 100644 --- a/tests/unit/gapic/aiplatform_v1/test_match_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_match_service.py @@ -1262,12 +1262,7 @@ async def test_find_neighbors_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.find_neighbors ] = mock_object @@ -1556,12 +1551,7 @@ async def test_read_index_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_index_datapoints ] = mock_object @@ -1828,7 +1818,7 @@ def test_find_neighbors_rest_required_fields( response = client.find_neighbors(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2081,7 +2071,7 @@ def test_read_index_datapoints_rest_required_fields( response = client.read_index_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py index c0858993b9..174a643e03 100644 --- a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -1350,12 +1350,7 @@ async def test_create_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_metadata_store ] = mock_object @@ -1766,12 +1761,7 @@ async def test_get_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_metadata_store ] = mock_object @@ -2162,12 +2152,7 @@ async def test_list_metadata_stores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_metadata_stores ] = mock_object @@ -2752,12 +2737,7 @@ async def test_delete_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_metadata_store ] = mock_object @@ -3156,12 +3136,7 @@ async def test_create_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_artifact ] = mock_object @@ -3580,12 +3555,7 @@ async def test_get_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_artifact ] = mock_object @@ -3965,12 +3935,7 @@ async def test_list_artifacts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_artifacts ] = mock_object @@ -4541,12 +4506,7 @@ async def test_update_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_artifact ] = mock_object @@ -4935,12 +4895,7 @@ async def test_delete_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_artifact ] = mock_object @@ -5306,12 +5261,7 @@ async def test_purge_artifacts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_artifacts ] = mock_object @@ -5696,12 +5646,7 @@ async def test_create_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_context ] = mock_object @@ -6111,12 +6056,7 @@ async def test_get_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_context ] = mock_object @@ -6494,12 +6434,7 @@ async def test_list_contexts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_contexts ] = mock_object @@ -7067,12 +7002,7 @@ async def test_update_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_context ] = mock_object @@ -7455,12 +7385,7 @@ async def test_delete_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_context ] = mock_object @@ -7826,12 +7751,7 @@ async def test_purge_contexts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_contexts ] = mock_object @@ -8206,12 +8126,7 @@ async def test_add_context_artifacts_and_executions_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_context_artifacts_and_executions ] = mock_object @@ -8612,12 +8527,7 @@ async def test_add_context_children_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_context_children ] = mock_object @@ -9007,12 +8917,7 @@ async def test_remove_context_children_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.remove_context_children ] = mock_object @@ -9402,12 +9307,7 @@ async def test_query_context_lineage_subgraph_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_context_lineage_subgraph ] = mock_object @@ -9801,12 +9701,7 @@ async def test_create_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_execution ] = mock_object @@ -10221,12 +10116,7 @@ async def test_get_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_execution ] = mock_object @@ -10604,12 +10494,7 @@ async def test_list_executions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_executions ] = mock_object @@ -11179,12 +11064,7 @@ async def test_update_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_execution ] = mock_object @@ -11574,12 +11454,7 @@ async def test_delete_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_execution ] = mock_object @@ -11948,12 +11823,7 @@ async def test_purge_executions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_executions ] = mock_object @@ -12326,12 +12196,7 @@ async def test_add_execution_events_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_execution_events ] = mock_object @@ -12721,12 +12586,7 @@ async def test_query_execution_inputs_and_outputs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_execution_inputs_and_outputs ] = mock_object @@ -13128,12 +12988,7 @@ async def test_create_metadata_schema_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_metadata_schema ] = mock_object @@ -13566,12 +13421,7 @@ async def test_get_metadata_schema_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_metadata_schema ] = mock_object @@ -13974,12 +13824,7 @@ async def test_list_metadata_schemas_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_metadata_schemas ] = mock_object @@ -14562,12 +14407,7 @@ async def test_query_artifact_lineage_subgraph_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_artifact_lineage_subgraph ] = mock_object @@ -15002,7 +14842,7 @@ def test_create_metadata_store_rest_required_fields( response = client.create_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15323,7 +15163,7 @@ def test_get_metadata_store_rest_required_fields( response = client.get_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15639,7 +15479,7 @@ def test_list_metadata_stores_rest_required_fields( response = client.list_metadata_stores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16010,7 +15850,7 @@ def test_delete_metadata_store_rest_required_fields( response = client.delete_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16415,7 +16255,7 @@ def test_create_artifact_rest_required_fields( response = client.create_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16750,7 +16590,7 @@ def test_get_artifact_rest_required_fields( response = client.get_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17066,7 +16906,7 @@ def test_list_artifacts_rest_required_fields( response = client.list_artifacts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17544,7 +17384,7 @@ def test_update_artifact_rest_required_fields( response = client.update_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17865,7 +17705,7 @@ def test_delete_artifact_rest_required_fields( response = client.delete_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18175,7 +18015,7 @@ def test_purge_artifacts_rest_required_fields( response = client.purge_artifacts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18587,7 +18427,7 @@ def test_create_context_rest_required_fields( response = client.create_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18918,7 +18758,7 @@ def test_get_context_rest_required_fields( response = client.get_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19234,7 +19074,7 @@ def test_list_contexts_rest_required_fields( response = client.list_contexts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19709,7 +19549,7 @@ def test_update_context_rest_required_fields( response = client.update_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20033,7 +19873,7 @@ def test_delete_context_rest_required_fields( response = client.delete_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20351,7 +20191,7 @@ def test_purge_contexts_rest_required_fields( response = client.purge_contexts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20677,7 +20517,7 @@ def test_add_context_artifacts_and_executions_rest_required_fields( response = client.add_context_artifacts_and_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21003,7 +20843,7 @@ def test_add_context_children_rest_required_fields( response = client.add_context_children(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21319,7 +21159,7 @@ def test_remove_context_children_rest_required_fields( response = client.remove_context_children(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21634,7 +21474,7 @@ def test_query_context_lineage_subgraph_rest_required_fields( response = client.query_context_lineage_subgraph(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22043,7 +21883,7 @@ def test_create_execution_rest_required_fields( response = client.create_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22376,7 +22216,7 @@ def test_get_execution_rest_required_fields( response = client.get_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22692,7 +22532,7 @@ def test_list_executions_rest_required_fields( response = client.list_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23169,7 +23009,7 @@ def test_update_execution_rest_required_fields( response = client.update_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23492,7 +23332,7 @@ def test_delete_execution_rest_required_fields( response = client.delete_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23804,7 +23644,7 @@ def test_purge_executions_rest_required_fields( response = client.purge_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24123,7 +23963,7 @@ def test_add_execution_events_rest_required_fields( response = client.add_execution_events(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24436,7 +24276,7 @@ def test_query_execution_inputs_and_outputs_rest_required_fields( response = client.query_execution_inputs_and_outputs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24848,7 +24688,7 @@ def test_create_metadata_schema_rest_required_fields( response = client.create_metadata_schema(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25184,7 +25024,7 @@ def test_get_metadata_schema_rest_required_fields( response = client.get_metadata_schema(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25506,7 +25346,7 @@ def test_list_metadata_schemas_rest_required_fields( response = client.list_metadata_schemas(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25894,7 +25734,7 @@ def test_query_artifact_lineage_subgraph_rest_required_fields( response = client.query_artifact_lineage_subgraph(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index e4729026e7..4b3143f2c0 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1342,12 +1342,7 @@ async def test_search_migratable_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_migratable_resources ] = mock_object @@ -1936,12 +1931,7 @@ async def test_batch_migrate_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_migrate_resources ] = mock_object @@ -2354,7 +2344,7 @@ def test_search_migratable_resources_rest_required_fields( response = client.search_migratable_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2724,7 +2714,7 @@ def test_batch_migrate_resources_rest_required_fields( response = client.batch_migrate_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3533,19 +3523,22 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -3555,22 +3548,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "squid" + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py index 7391745d33..8475441c28 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py @@ -1375,12 +1375,7 @@ async def test_get_publisher_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_publisher_model ] = mock_object @@ -1741,6 +1736,7 @@ def test_get_publisher_model_rest_required_fields( # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( ( + "is_hugging_face_model", "language_code", "view", ) @@ -1787,7 +1783,7 @@ def test_get_publisher_model_rest_required_fields( response = client.get_publisher_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -1801,6 +1797,7 @@ def test_get_publisher_model_rest_unset_required_fields(): assert set(unset_fields) == ( set( ( + "isHuggingFaceModel", "languageCode", "view", ) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index d8502d9768..2909a6890e 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -1293,12 +1293,7 @@ async def test_upload_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upload_model ] = mock_object @@ -1733,12 +1728,7 @@ async def test_get_model_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model ] = mock_object @@ -2148,12 +2138,7 @@ async def test_list_models_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_models ] = mock_object @@ -2725,12 +2710,7 @@ async def test_list_model_versions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_versions ] = mock_object @@ -3360,12 +3340,7 @@ async def test_update_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_model ] = mock_object @@ -3791,12 +3766,7 @@ async def test_update_explanation_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_explanation_dataset ] = mock_object @@ -4171,12 +4141,7 @@ async def test_delete_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model ] = mock_object @@ -4552,12 +4517,7 @@ async def test_delete_model_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_version ] = mock_object @@ -5008,12 +4968,7 @@ async def test_merge_version_aliases_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.merge_version_aliases ] = mock_object @@ -5437,12 +5392,7 @@ async def test_export_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_model ] = mock_object @@ -5832,12 +5782,7 @@ async def test_copy_model_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.copy_model ] = mock_object @@ -6240,12 +6185,7 @@ async def test_import_model_evaluation_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_model_evaluation ] = mock_object @@ -6657,12 +6597,7 @@ async def test_batch_import_model_evaluation_slices_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_import_model_evaluation_slices ] = mock_object @@ -7072,12 +7007,7 @@ async def test_batch_import_evaluated_annotations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_import_evaluated_annotations ] = mock_object @@ -7513,12 +7443,7 @@ async def test_get_model_evaluation_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_evaluation ] = mock_object @@ -7920,12 +7845,7 @@ async def test_list_model_evaluations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_evaluations ] = mock_object @@ -8514,12 +8434,7 @@ async def test_get_model_evaluation_slice_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_evaluation_slice ] = mock_object @@ -8913,12 +8828,7 @@ async def test_list_model_evaluation_slices_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_evaluation_slices ] = mock_object @@ -9474,7 +9384,7 @@ def test_upload_model_rest_required_fields( response = client.upload_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9825,7 +9735,7 @@ def test_get_model_rest_required_fields(request_type=model_service.GetModelReque response = client.get_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10131,7 +10041,7 @@ def test_list_models_rest_required_fields(request_type=model_service.ListModelsR response = client.list_models(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10513,7 +10423,7 @@ def test_list_model_versions_rest_required_fields( response = client.list_model_versions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11112,7 +11022,7 @@ def test_update_model_rest_required_fields( response = client.update_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11429,7 +11339,7 @@ def test_update_explanation_dataset_rest_required_fields( response = client.update_explanation_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11728,7 +11638,7 @@ def test_delete_model_rest_required_fields( response = client.delete_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12030,7 +11940,7 @@ def test_delete_model_version_rest_required_fields( response = client.delete_model_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12383,7 +12293,7 @@ def test_merge_version_aliases_rest_required_fields( response = client.merge_version_aliases(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12691,7 +12601,7 @@ def test_export_model_rest_required_fields( response = client.export_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13007,7 +12917,7 @@ def test_copy_model_rest_required_fields(request_type=model_service.CopyModelReq response = client.copy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13334,7 +13244,7 @@ def test_import_model_evaluation_rest_required_fields( response = client.import_model_evaluation(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13661,7 +13571,7 @@ def test_batch_import_model_evaluation_slices_rest_required_fields( response = client.batch_import_model_evaluation_slices(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14005,7 +13915,7 @@ def test_batch_import_evaluated_annotations_rest_required_fields( response = client.batch_import_evaluated_annotations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14356,7 +14266,7 @@ def test_get_model_evaluation_rest_required_fields( response = client.get_model_evaluation(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14677,7 +14587,7 @@ def test_list_model_evaluations_rest_required_fields( response = client.list_model_evaluations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15056,7 +14966,7 @@ def test_get_model_evaluation_slice_rest_required_fields( response = client.get_model_evaluation_slice(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15381,7 +15291,7 @@ def test_list_model_evaluation_slices_rest_required_fields( response = client.list_model_evaluation_slices(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py index ae6d31eb2c..ef708931cf 100644 --- a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py @@ -57,9 +57,14 @@ from google.cloud.aiplatform_v1.services.notebook_service import transports from google.cloud.aiplatform_v1.types import accelerator_type from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_execution_job +from google.cloud.aiplatform_v1.types import ( + notebook_execution_job as gca_notebook_execution_job, +) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime @@ -72,10 +77,12 @@ from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore import google.auth @@ -1346,12 +1353,7 @@ async def test_create_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_notebook_runtime_template ] = mock_object @@ -1792,12 +1794,7 @@ async def test_get_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_notebook_runtime_template ] = mock_object @@ -2208,12 +2205,7 @@ async def test_list_notebook_runtime_templates_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_notebook_runtime_templates ] = mock_object @@ -2802,12 +2794,7 @@ async def test_delete_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_notebook_runtime_template ] = mock_object @@ -3216,12 +3203,7 @@ async def test_update_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_notebook_runtime_template ] = mock_object @@ -3647,12 +3629,7 @@ async def test_assign_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.assign_notebook_runtime ] = mock_object @@ -4114,12 +4091,7 @@ async def test_get_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_notebook_runtime ] = mock_object @@ -4544,12 +4516,7 @@ async def test_list_notebook_runtimes_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_notebook_runtimes ] = mock_object @@ -5134,12 +5101,7 @@ async def test_delete_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_notebook_runtime ] = mock_object @@ -5527,12 +5489,7 @@ async def test_upgrade_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upgrade_notebook_runtime ] = mock_object @@ -5920,12 +5877,7 @@ async def test_start_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.start_notebook_runtime ] = mock_object @@ -6140,153 +6092,98 @@ async def test_start_notebook_runtime_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - notebook_service.CreateNotebookRuntimeTemplateRequest, + notebook_service.CreateNotebookExecutionJobRequest, dict, ], ) -def test_create_notebook_runtime_template_rest(request_type): +def test_create_notebook_execution_job(request_type, transport: str = "grpc"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} - request_init["notebook_runtime_template"] = { - "name": "name_value", - "display_name": "display_name_value", - "description": "description_value", - "is_default": True, - "machine_spec": { - "machine_type": "machine_type_value", - "accelerator_type": 1, - "accelerator_count": 1805, - "tpu_topology": "tpu_topology_value", - }, - "data_persistent_disk_spec": { - "disk_type": "disk_type_value", - "disk_size_gb": 1261, - }, - "network_spec": { - "enable_internet_access": True, - "network": "network_value", - "subnetwork": "subnetwork_value", - }, - "service_account": "service_account_value", - "etag": "etag_value", - "labels": {}, - "idle_shutdown_config": { - "idle_timeout": {"seconds": 751, "nanos": 543}, - "idle_shutdown_disabled": True, - }, - "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, - "create_time": {"seconds": 751, "nanos": 543}, - "update_time": {}, - "notebook_runtime_type": 1, - "shielded_vm_config": {"enable_secure_boot": True}, - "network_tags": ["network_tags_value1", "network_tags_value2"], - "encryption_spec": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = notebook_service.CreateNotebookRuntimeTemplateRequest.meta.fields[ - "notebook_runtime_template" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_notebook_execution_job(request) - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookExecutionJobRequest() + assert args[0] == request - subfields_not_in_runtime = [] + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init[ - "notebook_runtime_template" - ].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) +def test_create_notebook_execution_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range( - 0, len(request_init["notebook_runtime_template"][field]) - ): - del request_init["notebook_runtime_template"][field][i][subfield] - else: - del request_init["notebook_runtime_template"][field][subfield] - request = request_type(**request_init) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookExecutionJobRequest() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) +def test_create_notebook_execution_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_notebook_runtime_template(request) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job_id="notebook_execution_job_id_value", + ) - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_notebook_execution_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookExecutionJobRequest( + parent="parent_value", + notebook_execution_job_id="notebook_execution_job_id_value", + ) -def test_create_notebook_runtime_template_rest_use_cached_wrapped_rpc(): +def test_create_notebook_execution_job_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -6295,7 +6192,7 @@ def test_create_notebook_runtime_template_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.create_notebook_runtime_template + client._transport.create_notebook_execution_job in client._transport._wrapped_methods ) @@ -6305,11 +6202,10 @@ def test_create_notebook_runtime_template_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.create_notebook_runtime_template + client._transport.create_notebook_execution_job ] = mock_rpc - request = {} - client.create_notebook_runtime_template(request) + client.create_notebook_execution_job(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -6318,56 +6214,3442 @@ def test_create_notebook_runtime_template_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.create_notebook_runtime_template(request) + client.create_notebook_execution_job(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_notebook_runtime_template_rest_required_fields( - request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, -): - transport_class = transports.NotebookServiceRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.asyncio +async def test_create_notebook_execution_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) - # verify fields with default values are dropped + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookExecutionJobRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +@pytest.mark.asyncio +async def test_create_notebook_execution_job_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - jsonified_request["parent"] = "parent_value" + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("notebook_runtime_template_id",)) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert ( + client._client._transport.create_notebook_execution_job + in client._client._transport._wrapped_methods + ) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Replace cached wrapped function with mock + mock_object = mock.AsyncMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_notebook_execution_job + ] = mock_object - client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + request = {} + await client.create_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_notebook_execution_job(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_notebook_execution_job_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.CreateNotebookExecutionJobRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookExecutionJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_notebook_execution_job_async_from_dict(): + await test_create_notebook_execution_job_async(request_type=dict) + + +def test_create_notebook_execution_job_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookExecutionJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_notebook_execution_job_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookExecutionJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_notebook_execution_job_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_notebook_execution_job( + parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_execution_job + mock_val = gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ) + assert arg == mock_val + arg = args[0].notebook_execution_job_id + mock_val = "notebook_execution_job_id_value" + assert arg == mock_val + + +def test_create_notebook_execution_job_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_execution_job( + notebook_service.CreateNotebookExecutionJobRequest(), + parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_notebook_execution_job_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_notebook_execution_job( + parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_execution_job + mock_val = gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ) + assert arg == mock_val + arg = args[0].notebook_execution_job_id + mock_val = "notebook_execution_job_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_notebook_execution_job_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_notebook_execution_job( + notebook_service.CreateNotebookExecutionJobRequest(), + parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookExecutionJobRequest, + dict, + ], +) +def test_get_notebook_execution_job(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_execution_job.NotebookExecutionJob( + name="name_value", + display_name="display_name_value", + schedule_resource_name="schedule_resource_name_value", + job_state=job_state.JobState.JOB_STATE_QUEUED, + notebook_runtime_template_resource_name="notebook_runtime_template_resource_name_value", + gcs_output_uri="gcs_output_uri_value", + execution_user="execution_user_value", + ) + response = client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookExecutionJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_execution_job.NotebookExecutionJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.schedule_resource_name == "schedule_resource_name_value" + assert response.job_state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_notebook_execution_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookExecutionJobRequest() + + +def test_get_notebook_execution_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.GetNotebookExecutionJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_notebook_execution_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookExecutionJobRequest( + name="name_value", + ) + + +def test_get_notebook_execution_job_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_notebook_execution_job + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_notebook_execution_job + ] = mock_rpc + request = {} + client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_notebook_execution_job(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_execution_job.NotebookExecutionJob( + name="name_value", + display_name="display_name_value", + schedule_resource_name="schedule_resource_name_value", + job_state=job_state.JobState.JOB_STATE_QUEUED, + ) + ) + response = await client.get_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookExecutionJobRequest() + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_notebook_execution_job + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_object = mock.AsyncMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_notebook_execution_job + ] = mock_object + + request = {} + await client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_notebook_execution_job(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.GetNotebookExecutionJobRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_execution_job.NotebookExecutionJob( + name="name_value", + display_name="display_name_value", + schedule_resource_name="schedule_resource_name_value", + job_state=job_state.JobState.JOB_STATE_QUEUED, + ) + ) + response = await client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookExecutionJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_execution_job.NotebookExecutionJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.schedule_resource_name == "schedule_resource_name_value" + assert response.job_state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_async_from_dict(): + await test_get_notebook_execution_job_async(request_type=dict) + + +def test_get_notebook_execution_job_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookExecutionJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + call.return_value = notebook_execution_job.NotebookExecutionJob() + client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookExecutionJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_execution_job.NotebookExecutionJob() + ) + await client.get_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_notebook_execution_job_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_execution_job.NotebookExecutionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notebook_execution_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_notebook_execution_job_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_execution_job( + notebook_service.GetNotebookExecutionJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_execution_job.NotebookExecutionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_execution_job.NotebookExecutionJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notebook_execution_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_notebook_execution_job_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notebook_execution_job( + notebook_service.GetNotebookExecutionJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookExecutionJobsRequest, + dict, + ], +) +def test_list_notebook_execution_jobs(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookExecutionJobsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookExecutionJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookExecutionJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_execution_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_notebook_execution_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookExecutionJobsRequest() + + +def test_list_notebook_execution_jobs_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.ListNotebookExecutionJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_notebook_execution_jobs(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookExecutionJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +def test_list_notebook_execution_jobs_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_notebook_execution_jobs + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_notebook_execution_jobs + ] = mock_rpc + request = {} + client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_notebook_execution_jobs(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookExecutionJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_execution_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookExecutionJobsRequest() + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_notebook_execution_jobs + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_object = mock.AsyncMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_notebook_execution_jobs + ] = mock_object + + request = {} + await client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_notebook_execution_jobs(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.ListNotebookExecutionJobsRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookExecutionJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookExecutionJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookExecutionJobsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_async_from_dict(): + await test_list_notebook_execution_jobs_async(request_type=dict) + + +def test_list_notebook_execution_jobs_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookExecutionJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + call.return_value = notebook_service.ListNotebookExecutionJobsResponse() + client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookExecutionJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookExecutionJobsResponse() + ) + await client.list_notebook_execution_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_notebook_execution_jobs_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookExecutionJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notebook_execution_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_notebook_execution_jobs_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_execution_jobs( + notebook_service.ListNotebookExecutionJobsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookExecutionJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookExecutionJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notebook_execution_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notebook_execution_jobs( + notebook_service.ListNotebookExecutionJobsRequest(), + parent="parent_value", + ) + + +def test_list_notebook_execution_jobs_pager(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[], + next_page_token="def", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_notebook_execution_jobs(request={}) + + assert pager._metadata == expected_metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_execution_job.NotebookExecutionJob) for i in results + ) + + +def test_list_notebook_execution_jobs_pages(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[], + next_page_token="def", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notebook_execution_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_async_pager(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[], + next_page_token="def", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notebook_execution_jobs( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, notebook_execution_job.NotebookExecutionJob) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_notebook_execution_jobs_async_pages(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_execution_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[], + next_page_token="def", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_notebook_execution_jobs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookExecutionJobRequest, + dict, + ], +) +def test_delete_notebook_execution_job(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookExecutionJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_notebook_execution_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookExecutionJobRequest() + + +def test_delete_notebook_execution_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_notebook_execution_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookExecutionJobRequest( + name="name_value", + ) + + +def test_delete_notebook_execution_job_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_notebook_execution_job + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_notebook_execution_job + ] = mock_rpc + request = {} + client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_notebook_execution_job(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_execution_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookExecutionJobRequest() + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_notebook_execution_job + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_object = mock.AsyncMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_notebook_execution_job + ] = mock_object + + request = {} + await client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_notebook_execution_job(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.DeleteNotebookExecutionJobRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookExecutionJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_async_from_dict(): + await test_delete_notebook_execution_job_async(request_type=dict) + + +def test_delete_notebook_execution_job_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookExecutionJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookExecutionJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_notebook_execution_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_notebook_execution_job_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notebook_execution_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_notebook_execution_job_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_execution_job( + notebook_service.DeleteNotebookExecutionJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_execution_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notebook_execution_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_notebook_execution_job_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notebook_execution_job( + notebook_service.DeleteNotebookExecutionJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_create_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["notebook_runtime_template"] = { + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "is_default": True, + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "data_persistent_disk_spec": { + "disk_type": "disk_type_value", + "disk_size_gb": 1261, + }, + "network_spec": { + "enable_internet_access": True, + "network": "network_value", + "subnetwork": "subnetwork_value", + }, + "service_account": "service_account_value", + "etag": "etag_value", + "labels": {}, + "idle_shutdown_config": { + "idle_timeout": {"seconds": 751, "nanos": 543}, + "idle_shutdown_disabled": True, + }, + "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "notebook_runtime_type": 1, + "shielded_vm_config": {"enable_secure_boot": True}, + "network_tags": ["network_tags_value1", "network_tags_value2"], + "encryption_spec": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = notebook_service.CreateNotebookRuntimeTemplateRequest.meta.fields[ + "notebook_runtime_template" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "notebook_runtime_template" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["notebook_runtime_template"][field]) + ): + del request_init["notebook_runtime_template"][field][i][subfield] + else: + del request_init["notebook_runtime_template"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_notebook_runtime_template_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_notebook_runtime_template + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_notebook_runtime_template + ] = mock_rpc + + request = {} + client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_notebook_runtime_template(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("notebook_runtime_template_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_notebook_runtime_template(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.create_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set(("notebookRuntimeTemplateId",)) + & set( + ( + "parent", + "notebookRuntimeTemplate", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_create_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_create_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( + notebook_service.CreateNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_notebook_runtime_template(request) + + +def test_create_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_create_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +def test_create_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_get_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_template_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_notebook_runtime_template + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_notebook_runtime_template + ] = mock_rpc + + request = {} + client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_notebook_runtime_template(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_notebook_runtime_template(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_notebook_runtime_template._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime_template" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime_template" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.GetNotebookRuntimeTemplateRequest.pb( + notebook_service.GetNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( + notebook_runtime.NotebookRuntimeTemplate() + ) + + request = notebook_service.GetNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_runtime.NotebookRuntimeTemplate() + + client.get_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_notebook_runtime_template(request) + + +def test_get_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_get_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_get_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimeTemplatesRequest, + dict, + ], +) +def test_list_notebook_runtime_templates_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_notebook_runtime_templates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtime_templates_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_notebook_runtime_templates + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_notebook_runtime_templates + ] = mock_rpc + + request = {} + client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_notebook_runtime_templates(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_notebook_runtime_templates_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_notebook_runtime_templates(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_notebook_runtime_templates_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_notebook_runtime_templates._get_unset_required_fields( + {} + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_list_notebook_runtime_templates", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtime_templates" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( + notebook_service.ListNotebookRuntimeTemplatesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + ) + + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + client.list_notebook_runtime_templates( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_notebook_runtime_templates_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_notebook_runtime_templates(request) + + +def test_list_notebook_runtime_templates_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_notebook_runtime_templates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_list_notebook_runtime_templates_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtime_templates_rest_pager(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_runtime_templates(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results + ) + + pages = list( + client.list_notebook_runtime_templates(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_delete_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_notebook_runtime_template_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_notebook_runtime_template + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_notebook_runtime_template + ] = mock_rpc + + request = {} + client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_notebook_runtime_template(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_notebook_runtime_template(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.delete_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_delete_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_delete_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( + notebook_service.DeleteNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_notebook_runtime_template(request) + + +def test_delete_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_delete_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.UpdateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_update_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "notebook_runtime_template": { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + } + request_init["notebook_runtime_template"] = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3", + "display_name": "display_name_value", + "description": "description_value", + "is_default": True, + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "data_persistent_disk_spec": { + "disk_type": "disk_type_value", + "disk_size_gb": 1261, + }, + "network_spec": { + "enable_internet_access": True, + "network": "network_value", + "subnetwork": "subnetwork_value", + }, + "service_account": "service_account_value", + "etag": "etag_value", + "labels": {}, + "idle_shutdown_config": { + "idle_timeout": {"seconds": 751, "nanos": 543}, + "idle_shutdown_disabled": True, + }, + "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "notebook_runtime_type": 1, + "shielded_vm_config": {"enable_secure_boot": True}, + "network_tags": ["network_tags_value1", "network_tags_value2"], + "encryption_spec": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = notebook_service.UpdateNotebookRuntimeTemplateRequest.meta.fields[ + "notebook_runtime_template" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "notebook_runtime_template" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["notebook_runtime_template"][field]) + ): + del request_init["notebook_runtime_template"][field][i][subfield] + else: + del request_init["notebook_runtime_template"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_update_notebook_runtime_template_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_notebook_runtime_template + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_notebook_runtime_template + ] = mock_rpc + + request = {} + client.update_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_notebook_runtime_template(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.UpdateNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_notebook_runtime_template._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_runtime.NotebookRuntimeTemplate() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6379,7 +9661,7 @@ def test_create_notebook_runtime_template_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "patch", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -6387,39 +9669,42 @@ def test_create_notebook_runtime_template_rest_required_fields( response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_notebook_runtime_template(request) + response = client.update_notebook_runtime_template(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_notebook_runtime_template_rest_unset_required_fields(): +def test_update_notebook_runtime_template_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) unset_fields = ( - transport.create_notebook_runtime_template._get_unset_required_fields({}) + transport.update_notebook_runtime_template._get_unset_required_fields({}) ) assert set(unset_fields) == ( - set(("notebookRuntimeTemplateId",)) + set(("updateMask",)) & set( ( - "parent", "notebookRuntimeTemplate", + "updateMask", ) ) ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): +def test_update_notebook_runtime_template_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -6432,18 +9717,16 @@ def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( transports.NotebookServiceRestInterceptor, - "post_create_notebook_runtime_template", + "post_update_notebook_runtime_template", ) as post, mock.patch.object( transports.NotebookServiceRestInterceptor, - "pre_create_notebook_runtime_template", + "pre_update_notebook_runtime_template", ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( - notebook_service.CreateNotebookRuntimeTemplateRequest() + pb_message = notebook_service.UpdateNotebookRuntimeTemplateRequest.pb( + notebook_service.UpdateNotebookRuntimeTemplateRequest() ) transcode.return_value = { "method": "post", @@ -6455,19 +9738,19 @@ def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( + notebook_runtime.NotebookRuntimeTemplate() ) - request = notebook_service.CreateNotebookRuntimeTemplateRequest() + request = notebook_service.UpdateNotebookRuntimeTemplateRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = notebook_runtime.NotebookRuntimeTemplate() - client.create_notebook_runtime_template( + client.update_notebook_runtime_template( request, metadata=[ ("key", "val"), @@ -6479,9 +9762,9 @@ def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): post.assert_called_once() -def test_create_notebook_runtime_template_rest_bad_request( +def test_update_notebook_runtime_template_rest_bad_request( transport: str = "rest", - request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, + request_type=notebook_service.UpdateNotebookRuntimeTemplateRequest, ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6489,7 +9772,11 @@ def test_create_notebook_runtime_template_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "notebook_runtime_template": { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -6501,10 +9788,10 @@ def test_create_notebook_runtime_template_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.create_notebook_runtime_template(request) + client.update_notebook_runtime_template(request) -def test_create_notebook_runtime_template_rest_flattened(): +def test_update_notebook_runtime_template_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6513,42 +9800,47 @@ def test_create_notebook_runtime_template_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_runtime.NotebookRuntimeTemplate() # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + sample_request = { + "notebook_runtime_template": { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( name="name_value" ), - notebook_runtime_template_id="notebook_runtime_template_id_value", + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.create_notebook_runtime_template(**mock_args) + client.update_notebook_runtime_template(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + "%s/v1/{notebook_runtime_template.name=projects/*/locations/*/notebookRuntimeTemplates/*}" % client.transport._host, args[1], ) -def test_create_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): +def test_update_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6557,17 +9849,16 @@ def test_create_notebook_runtime_template_rest_flattened_error(transport: str = # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_notebook_runtime_template( - notebook_service.CreateNotebookRuntimeTemplateRequest(), - parent="parent_value", + client.update_notebook_runtime_template( + notebook_service.UpdateNotebookRuntimeTemplateRequest(), notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( name="name_value" ), - notebook_runtime_template_id="notebook_runtime_template_id_value", + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_create_notebook_runtime_template_rest_error(): +def test_update_notebook_runtime_template_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -6576,63 +9867,39 @@ def test_create_notebook_runtime_template_rest_error(): @pytest.mark.parametrize( "request_type", [ - notebook_service.GetNotebookRuntimeTemplateRequest, + notebook_service.AssignNotebookRuntimeRequest, dict, ], ) -def test_get_notebook_runtime_template_rest(request_type): +def test_assign_notebook_runtime_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate( - name="name_value", - display_name="display_name_value", - description="description_value", - is_default=True, - service_account="service_account_value", - etag="etag_value", - notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, - network_tags=["network_tags_value"], - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_notebook_runtime_template(request) + response = client.assign_notebook_runtime(request) # Establish that the response is the type that we expect. - assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.description == "description_value" - assert response.is_default is True - assert response.service_account == "service_account_value" - assert response.etag == "etag_value" - assert ( - response.notebook_runtime_type - == notebook_runtime.NotebookRuntimeType.USER_DEFINED - ) - assert response.network_tags == ["network_tags_value"] + assert response.operation.name == "operations/spam" -def test_get_notebook_runtime_template_rest_use_cached_wrapped_rpc(): +def test_assign_notebook_runtime_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -6647,7 +9914,7 @@ def test_get_notebook_runtime_template_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_notebook_runtime_template + client._transport.assign_notebook_runtime in client._transport._wrapped_methods ) @@ -6657,29 +9924,34 @@ def test_get_notebook_runtime_template_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_notebook_runtime_template + client._transport.assign_notebook_runtime ] = mock_rpc request = {} - client.get_notebook_runtime_template(request) + client.assign_notebook_runtime(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_notebook_runtime_template(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.assign_notebook_runtime(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_notebook_runtime_template_rest_required_fields( - request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +def test_assign_notebook_runtime_rest_required_fields( + request_type=notebook_service.AssignNotebookRuntimeRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" + request_init["notebook_runtime_template"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -6690,21 +9962,27 @@ def test_get_notebook_runtime_template_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" + jsonified_request["notebookRuntimeTemplate"] = "notebook_runtime_template_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "notebookRuntimeTemplate" in jsonified_request + assert ( + jsonified_request["notebookRuntimeTemplate"] + == "notebook_runtime_template_value" + ) client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6713,7 +9991,7 @@ def test_get_notebook_runtime_template_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6725,41 +10003,46 @@ def test_get_notebook_runtime_template_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_notebook_runtime_template(request) + response = client.assign_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_notebook_runtime_template_rest_unset_required_fields(): +def test_assign_notebook_runtime_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_notebook_runtime_template._get_unset_required_fields( - {} + unset_fields = transport.assign_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "notebookRuntimeTemplate", + "notebookRuntime", + ) + ) ) - assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): +def test_assign_notebook_runtime_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -6772,14 +10055,16 @@ def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime_template" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_assign_notebook_runtime" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime_template" + transports.NotebookServiceRestInterceptor, "pre_assign_notebook_runtime" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.GetNotebookRuntimeTemplateRequest.pb( - notebook_service.GetNotebookRuntimeTemplateRequest() + pb_message = notebook_service.AssignNotebookRuntimeRequest.pb( + notebook_service.AssignNotebookRuntimeRequest() ) transcode.return_value = { "method": "post", @@ -6791,19 +10076,19 @@ def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( - notebook_runtime.NotebookRuntimeTemplate() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = notebook_service.GetNotebookRuntimeTemplateRequest() + request = notebook_service.AssignNotebookRuntimeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = notebook_runtime.NotebookRuntimeTemplate() + post.return_value = operations_pb2.Operation() - client.get_notebook_runtime_template( + client.assign_notebook_runtime( request, metadata=[ ("key", "val"), @@ -6815,9 +10100,8 @@ def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): post.assert_called_once() -def test_get_notebook_runtime_template_rest_bad_request( - transport: str = "rest", - request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +def test_assign_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.AssignNotebookRuntimeRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6825,9 +10109,7 @@ def test_get_notebook_runtime_template_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -6839,10 +10121,10 @@ def test_get_notebook_runtime_template_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.get_notebook_runtime_template(request) + client.assign_notebook_runtime(request) -def test_get_notebook_runtime_template_rest_flattened(): +def test_assign_notebook_runtime_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6851,42 +10133,41 @@ def test_get_notebook_runtime_template_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + sample_request = {"parent": "projects/sample1/locations/sample2"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_notebook_runtime_template(**mock_args) + client.assign_notebook_runtime(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes:assign" % client.transport._host, args[1], ) -def test_get_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): +def test_assign_notebook_runtime_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6895,13 +10176,16 @@ def test_get_notebook_runtime_template_rest_flattened_error(transport: str = "re # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_notebook_runtime_template( - notebook_service.GetNotebookRuntimeTemplateRequest(), - name="name_value", + client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", ) -def test_get_notebook_runtime_template_rest_error(): +def test_assign_notebook_runtime_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -6910,46 +10194,77 @@ def test_get_notebook_runtime_template_rest_error(): @pytest.mark.parametrize( "request_type", [ - notebook_service.ListNotebookRuntimeTemplatesRequest, + notebook_service.GetNotebookRuntimeRequest, dict, ], ) -def test_list_notebook_runtime_templates_rest(request_type): +def test_get_notebook_runtime_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( - next_page_token="next_page_token_value", + return_value = notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( - return_value - ) + return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_notebook_runtime_templates(request) + response = client.get_notebook_runtime(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True -def test_list_notebook_runtime_templates_rest_use_cached_wrapped_rpc(): +def test_get_notebook_runtime_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -6964,8 +10279,7 @@ def test_list_notebook_runtime_templates_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_notebook_runtime_templates - in client._transport._wrapped_methods + client._transport.get_notebook_runtime in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -6974,29 +10288,29 @@ def test_list_notebook_runtime_templates_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_notebook_runtime_templates + client._transport.get_notebook_runtime ] = mock_rpc request = {} - client.list_notebook_runtime_templates(request) + client.get_notebook_runtime(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_notebook_runtime_templates(request) + client.get_notebook_runtime(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_notebook_runtime_templates_rest_required_fields( - request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +def test_get_notebook_runtime_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} - request_init["parent"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7007,31 +10321,21 @@ def test_list_notebook_runtime_templates_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - "read_mask", - ) - ) + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7040,7 +10344,7 @@ def test_list_notebook_runtime_templates_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + return_value = notebook_runtime.NotebookRuntime() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -7061,45 +10365,30 @@ def test_list_notebook_runtime_templates_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( - return_value - ) + return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_notebook_runtime_templates(request) + response = client.get_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_notebook_runtime_templates_rest_unset_required_fields(): +def test_get_notebook_runtime_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_notebook_runtime_templates._get_unset_required_fields( - {} - ) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - "readMask", - ) - ) - & set(("parent",)) - ) + unset_fields = transport.get_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): +def test_get_notebook_runtime_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7112,15 +10401,14 @@ def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.NotebookServiceRestInterceptor, - "post_list_notebook_runtime_templates", + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtime_templates" + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( - notebook_service.ListNotebookRuntimeTemplatesRequest() + pb_message = notebook_service.GetNotebookRuntimeRequest.pb( + notebook_service.GetNotebookRuntimeRequest() ) transcode.return_value = { "method": "post", @@ -7132,21 +10420,19 @@ def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = ( - notebook_service.ListNotebookRuntimeTemplatesResponse.to_json( - notebook_service.ListNotebookRuntimeTemplatesResponse() - ) + req.return_value._content = notebook_runtime.NotebookRuntime.to_json( + notebook_runtime.NotebookRuntime() ) - request = notebook_service.ListNotebookRuntimeTemplatesRequest() + request = notebook_service.GetNotebookRuntimeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + post.return_value = notebook_runtime.NotebookRuntime() - client.list_notebook_runtime_templates( + client.get_notebook_runtime( request, metadata=[ ("key", "val"), @@ -7158,9 +10444,8 @@ def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): post.assert_called_once() -def test_list_notebook_runtime_templates_rest_bad_request( - transport: str = "rest", - request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +def test_get_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.GetNotebookRuntimeRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7168,7 +10453,9 @@ def test_list_notebook_runtime_templates_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7180,10 +10467,10 @@ def test_list_notebook_runtime_templates_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.list_notebook_runtime_templates(request) + client.get_notebook_runtime(request) -def test_list_notebook_runtime_templates_rest_flattened(): +def test_get_notebook_runtime_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -7192,14 +10479,16 @@ def test_list_notebook_runtime_templates_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + return_value = notebook_runtime.NotebookRuntime() # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + name="name_value", ) mock_args.update(sample_request) @@ -7207,27 +10496,25 @@ def test_list_notebook_runtime_templates_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( - return_value - ) + return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.list_notebook_runtime_templates(**mock_args) + client.get_notebook_runtime(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" % client.transport._host, args[1], ) -def test_list_notebook_runtime_templates_rest_flattened_error(transport: str = "rest"): +def test_get_notebook_runtime_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7236,118 +10523,59 @@ def test_list_notebook_runtime_templates_rest_flattened_error(transport: str = " # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_notebook_runtime_templates( - notebook_service.ListNotebookRuntimeTemplatesRequest(), - parent="parent_value", + client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", ) -def test_list_notebook_runtime_templates_rest_pager(transport: str = "rest"): +def test_get_notebook_runtime_rest_error(): client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - notebook_service.ListNotebookRuntimeTemplatesResponse( - notebook_runtime_templates=[ - notebook_runtime.NotebookRuntimeTemplate(), - notebook_runtime.NotebookRuntimeTemplate(), - notebook_runtime.NotebookRuntimeTemplate(), - ], - next_page_token="abc", - ), - notebook_service.ListNotebookRuntimeTemplatesResponse( - notebook_runtime_templates=[], - next_page_token="def", - ), - notebook_service.ListNotebookRuntimeTemplatesResponse( - notebook_runtime_templates=[ - notebook_runtime.NotebookRuntimeTemplate(), - ], - next_page_token="ghi", - ), - notebook_service.ListNotebookRuntimeTemplatesResponse( - notebook_runtime_templates=[ - notebook_runtime.NotebookRuntimeTemplate(), - notebook_runtime.NotebookRuntimeTemplate(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - notebook_service.ListNotebookRuntimeTemplatesResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} - - pager = client.list_notebook_runtime_templates(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all( - isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results - ) - - pages = list( - client.list_notebook_runtime_templates(request=sample_request).pages - ) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - @pytest.mark.parametrize( "request_type", [ - notebook_service.DeleteNotebookRuntimeTemplateRequest, + notebook_service.ListNotebookRuntimesRequest, dict, ], ) -def test_delete_notebook_runtime_template_rest(request_type): +def test_list_notebook_runtimes_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_notebook_runtime_template(request) + response = client.list_notebook_runtimes(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert isinstance(response, pagers.ListNotebookRuntimesPager) + assert response.next_page_token == "next_page_token_value" -def test_delete_notebook_runtime_template_rest_use_cached_wrapped_rpc(): +def test_list_notebook_runtimes_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -7362,7 +10590,7 @@ def test_delete_notebook_runtime_template_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.delete_notebook_runtime_template + client._transport.list_notebook_runtimes in client._transport._wrapped_methods ) @@ -7372,33 +10600,29 @@ def test_delete_notebook_runtime_template_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.delete_notebook_runtime_template + client._transport.list_notebook_runtimes ] = mock_rpc request = {} - client.delete_notebook_runtime_template(request) + client.list_notebook_runtimes(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.delete_notebook_runtime_template(request) + client.list_notebook_runtimes(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_notebook_runtime_template_rest_required_fields( - request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +def test_list_notebook_runtimes_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimesRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7409,21 +10633,31 @@ def test_delete_notebook_runtime_template_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7432,7 +10666,7 @@ def test_delete_notebook_runtime_template_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookRuntimesResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -7444,38 +10678,52 @@ def test_delete_notebook_runtime_template_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_notebook_runtime_template(request) + response = client.list_notebook_runtimes(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_notebook_runtime_template_rest_unset_required_fields(): +def test_list_notebook_runtimes_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = ( - transport.delete_notebook_runtime_template._get_unset_required_fields({}) + unset_fields = transport.list_notebook_runtimes._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) ) - assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): +def test_list_notebook_runtimes_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7488,18 +10736,14 @@ def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.NotebookServiceRestInterceptor, - "post_delete_notebook_runtime_template", + transports.NotebookServiceRestInterceptor, "post_list_notebook_runtimes" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, - "pre_delete_notebook_runtime_template", + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtimes" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( - notebook_service.DeleteNotebookRuntimeTemplateRequest() + pb_message = notebook_service.ListNotebookRuntimesRequest.pb( + notebook_service.ListNotebookRuntimesRequest() ) transcode.return_value = { "method": "post", @@ -7511,19 +10755,21 @@ def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + req.return_value._content = ( + notebook_service.ListNotebookRuntimesResponse.to_json( + notebook_service.ListNotebookRuntimesResponse() + ) ) - request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + request = notebook_service.ListNotebookRuntimesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = notebook_service.ListNotebookRuntimesResponse() - client.delete_notebook_runtime_template( + client.list_notebook_runtimes( request, metadata=[ ("key", "val"), @@ -7535,9 +10781,8 @@ def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): post.assert_called_once() -def test_delete_notebook_runtime_template_rest_bad_request( - transport: str = "rest", - request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +def test_list_notebook_runtimes_rest_bad_request( + transport: str = "rest", request_type=notebook_service.ListNotebookRuntimesRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7545,9 +10790,7 @@ def test_delete_notebook_runtime_template_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7559,10 +10802,10 @@ def test_delete_notebook_runtime_template_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.delete_notebook_runtime_template(request) + client.list_notebook_runtimes(request) -def test_delete_notebook_runtime_template_rest_flattened(): +def test_list_notebook_runtimes_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -7571,40 +10814,40 @@ def test_delete_notebook_runtime_template_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookRuntimesResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + sample_request = {"parent": "projects/sample1/locations/sample2"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_notebook_runtime_template(**mock_args) + client.list_notebook_runtimes(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes" % client.transport._host, args[1], ) -def test_delete_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): +def test_list_notebook_runtimes_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7613,188 +10856,113 @@ def test_delete_notebook_runtime_template_rest_flattened_error(transport: str = # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_notebook_runtime_template( - notebook_service.DeleteNotebookRuntimeTemplateRequest(), - name="name_value", + client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", ) -def test_delete_notebook_runtime_template_rest_error(): - client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - notebook_service.UpdateNotebookRuntimeTemplateRequest, - dict, - ], -) -def test_update_notebook_runtime_template_rest(request_type): +def test_list_notebook_runtimes_rest_pager(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = { - "notebook_runtime_template": { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } - } - request_init["notebook_runtime_template"] = { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3", - "display_name": "display_name_value", - "description": "description_value", - "is_default": True, - "machine_spec": { - "machine_type": "machine_type_value", - "accelerator_type": 1, - "accelerator_count": 1805, - "tpu_topology": "tpu_topology_value", - }, - "data_persistent_disk_spec": { - "disk_type": "disk_type_value", - "disk_size_gb": 1261, - }, - "network_spec": { - "enable_internet_access": True, - "network": "network_value", - "subnetwork": "subnetwork_value", - }, - "service_account": "service_account_value", - "etag": "etag_value", - "labels": {}, - "idle_shutdown_config": { - "idle_timeout": {"seconds": 751, "nanos": 543}, - "idle_shutdown_disabled": True, - }, - "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, - "create_time": {"seconds": 751, "nanos": 543}, - "update_time": {}, - "notebook_runtime_type": 1, - "shielded_vm_config": {"enable_secure_boot": True}, - "network_tags": ["network_tags_value1", "network_tags_value2"], - "encryption_spec": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = notebook_service.UpdateNotebookRuntimeTemplateRequest.meta.fields[ - "notebook_runtime_template" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + ) + # Two responses for two calls + response = response + response - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] + sample_request = {"parent": "projects/sample1/locations/sample2"} - subfields_not_in_runtime = [] + pager = client.list_notebook_runtimes(request=sample_request) - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init[ - "notebook_runtime_template" - ].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) + pages = list(client.list_notebook_runtimes(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range( - 0, len(request_init["notebook_runtime_template"][field]) - ): - del request_init["notebook_runtime_template"][field][i][subfield] - else: - del request_init["notebook_runtime_template"][field][subfield] + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeRequest, + dict, + ], +) +def test_delete_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate( - name="name_value", - display_name="display_name_value", - description="description_value", - is_default=True, - service_account="service_account_value", - etag="etag_value", - notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, - network_tags=["network_tags_value"], - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_notebook_runtime_template(request) + response = client.delete_notebook_runtime(request) # Establish that the response is the type that we expect. - assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.description == "description_value" - assert response.is_default is True - assert response.service_account == "service_account_value" - assert response.etag == "etag_value" - assert ( - response.notebook_runtime_type - == notebook_runtime.NotebookRuntimeType.USER_DEFINED - ) - assert response.network_tags == ["network_tags_value"] + assert response.operation.name == "operations/spam" -def test_update_notebook_runtime_template_rest_use_cached_wrapped_rpc(): +def test_delete_notebook_runtime_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -7809,7 +10977,7 @@ def test_update_notebook_runtime_template_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.update_notebook_runtime_template + client._transport.delete_notebook_runtime in client._transport._wrapped_methods ) @@ -7819,28 +10987,33 @@ def test_update_notebook_runtime_template_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.update_notebook_runtime_template + client._transport.delete_notebook_runtime ] = mock_rpc request = {} - client.update_notebook_runtime_template(request) + client.delete_notebook_runtime(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.update_notebook_runtime_template(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_notebook_runtime(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_notebook_runtime_template_rest_required_fields( - request_type=notebook_service.UpdateNotebookRuntimeTemplateRequest, +def test_delete_notebook_runtime_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7851,19 +11024,21 @@ def test_update_notebook_runtime_template_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_notebook_runtime_template._get_unset_required_fields(jsonified_request) + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_notebook_runtime_template._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7872,7 +11047,7 @@ def test_update_notebook_runtime_template_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -7884,50 +11059,36 @@ def test_update_notebook_runtime_template_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_notebook_runtime_template(request) + response = client.delete_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_notebook_runtime_template_rest_unset_required_fields(): +def test_delete_notebook_runtime_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = ( - transport.update_notebook_runtime_template._get_unset_required_fields({}) - ) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "notebookRuntimeTemplate", - "updateMask", - ) - ) - ) + unset_fields = transport.delete_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_notebook_runtime_template_rest_interceptors(null_interceptor): +def test_delete_notebook_runtime_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7940,16 +11101,16 @@ def test_update_notebook_runtime_template_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.NotebookServiceRestInterceptor, - "post_update_notebook_runtime_template", + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_delete_notebook_runtime" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, - "pre_update_notebook_runtime_template", + transports.NotebookServiceRestInterceptor, "pre_delete_notebook_runtime" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.UpdateNotebookRuntimeTemplateRequest.pb( - notebook_service.UpdateNotebookRuntimeTemplateRequest() + pb_message = notebook_service.DeleteNotebookRuntimeRequest.pb( + notebook_service.DeleteNotebookRuntimeRequest() ) transcode.return_value = { "method": "post", @@ -7961,19 +11122,19 @@ def test_update_notebook_runtime_template_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( - notebook_runtime.NotebookRuntimeTemplate() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = notebook_service.UpdateNotebookRuntimeTemplateRequest() + request = notebook_service.DeleteNotebookRuntimeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = notebook_runtime.NotebookRuntimeTemplate() + post.return_value = operations_pb2.Operation() - client.update_notebook_runtime_template( + client.delete_notebook_runtime( request, metadata=[ ("key", "val"), @@ -7985,9 +11146,8 @@ def test_update_notebook_runtime_template_rest_interceptors(null_interceptor): post.assert_called_once() -def test_update_notebook_runtime_template_rest_bad_request( - transport: str = "rest", - request_type=notebook_service.UpdateNotebookRuntimeTemplateRequest, +def test_delete_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.DeleteNotebookRuntimeRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7996,9 +11156,7 @@ def test_update_notebook_runtime_template_rest_bad_request( # send a request that will satisfy transcoding request_init = { - "notebook_runtime_template": { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" } request = request_type(**request_init) @@ -8011,10 +11169,10 @@ def test_update_notebook_runtime_template_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.update_notebook_runtime_template(request) + client.delete_notebook_runtime(request) -def test_update_notebook_runtime_template_rest_flattened(): +def test_delete_notebook_runtime_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8023,47 +11181,40 @@ def test_update_notebook_runtime_template_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntimeTemplate() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = { - "notebook_runtime_template": { - "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" - } + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" } # get truthy value for each flattened field mock_args = dict( - notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( - name="name_value" - ), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.update_notebook_runtime_template(**mock_args) + client.delete_notebook_runtime(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{notebook_runtime_template.name=projects/*/locations/*/notebookRuntimeTemplates/*}" + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" % client.transport._host, args[1], ) -def test_update_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): +def test_delete_notebook_runtime_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -8072,16 +11223,13 @@ def test_update_notebook_runtime_template_rest_flattened_error(transport: str = # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_notebook_runtime_template( - notebook_service.UpdateNotebookRuntimeTemplateRequest(), - notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( - name="name_value" - ), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", ) -def test_update_notebook_runtime_template_rest_error(): +def test_delete_notebook_runtime_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -8090,18 +11238,20 @@ def test_update_notebook_runtime_template_rest_error(): @pytest.mark.parametrize( "request_type", [ - notebook_service.AssignNotebookRuntimeRequest, + notebook_service.UpgradeNotebookRuntimeRequest, dict, ], ) -def test_assign_notebook_runtime_rest(request_type): +def test_upgrade_notebook_runtime_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8116,13 +11266,13 @@ def test_assign_notebook_runtime_rest(request_type): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.assign_notebook_runtime(request) + response = client.upgrade_notebook_runtime(request) # Establish that the response is the type that we expect. assert response.operation.name == "operations/spam" -def test_assign_notebook_runtime_rest_use_cached_wrapped_rpc(): +def test_upgrade_notebook_runtime_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -8137,7 +11287,7 @@ def test_assign_notebook_runtime_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.assign_notebook_runtime + client._transport.upgrade_notebook_runtime in client._transport._wrapped_methods ) @@ -8147,11 +11297,11 @@ def test_assign_notebook_runtime_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.assign_notebook_runtime + client._transport.upgrade_notebook_runtime ] = mock_rpc request = {} - client.assign_notebook_runtime(request) + client.upgrade_notebook_runtime(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -8160,21 +11310,20 @@ def test_assign_notebook_runtime_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.assign_notebook_runtime(request) + client.upgrade_notebook_runtime(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_assign_notebook_runtime_rest_required_fields( - request_type=notebook_service.AssignNotebookRuntimeRequest, +def test_upgrade_notebook_runtime_rest_required_fields( + request_type=notebook_service.UpgradeNotebookRuntimeRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} - request_init["parent"] = "" - request_init["notebook_runtime_template"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -8185,27 +11334,21 @@ def test_assign_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["notebookRuntimeTemplate"] = "notebook_runtime_template_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "notebookRuntimeTemplate" in jsonified_request - assert ( - jsonified_request["notebookRuntimeTemplate"] - == "notebook_runtime_template_value" - ) + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8239,33 +11382,24 @@ def test_assign_notebook_runtime_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.assign_notebook_runtime(request) + response = client.upgrade_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_assign_notebook_runtime_rest_unset_required_fields(): +def test_upgrade_notebook_runtime_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.assign_notebook_runtime._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "notebookRuntimeTemplate", - "notebookRuntime", - ) - ) - ) + unset_fields = transport.upgrade_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_assign_notebook_runtime_rest_interceptors(null_interceptor): +def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8280,14 +11414,14 @@ def test_assign_notebook_runtime_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_assign_notebook_runtime" + transports.NotebookServiceRestInterceptor, "post_upgrade_notebook_runtime" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_assign_notebook_runtime" + transports.NotebookServiceRestInterceptor, "pre_upgrade_notebook_runtime" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.AssignNotebookRuntimeRequest.pb( - notebook_service.AssignNotebookRuntimeRequest() + pb_message = notebook_service.UpgradeNotebookRuntimeRequest.pb( + notebook_service.UpgradeNotebookRuntimeRequest() ) transcode.return_value = { "method": "post", @@ -8303,7 +11437,7 @@ def test_assign_notebook_runtime_rest_interceptors(null_interceptor): operations_pb2.Operation() ) - request = notebook_service.AssignNotebookRuntimeRequest() + request = notebook_service.UpgradeNotebookRuntimeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -8311,7 +11445,7 @@ def test_assign_notebook_runtime_rest_interceptors(null_interceptor): pre.return_value = request, metadata post.return_value = operations_pb2.Operation() - client.assign_notebook_runtime( + client.upgrade_notebook_runtime( request, metadata=[ ("key", "val"), @@ -8323,8 +11457,8 @@ def test_assign_notebook_runtime_rest_interceptors(null_interceptor): post.assert_called_once() -def test_assign_notebook_runtime_rest_bad_request( - transport: str = "rest", request_type=notebook_service.AssignNotebookRuntimeRequest +def test_upgrade_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.UpgradeNotebookRuntimeRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8332,7 +11466,9 @@ def test_assign_notebook_runtime_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8344,10 +11480,10 @@ def test_assign_notebook_runtime_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.assign_notebook_runtime(request) + client.upgrade_notebook_runtime(request) -def test_assign_notebook_runtime_rest_flattened(): +def test_upgrade_notebook_runtime_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8359,14 +11495,13 @@ def test_assign_notebook_runtime_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/locations/sample2"} + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - notebook_runtime_template="notebook_runtime_template_value", - notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), - notebook_runtime_id="notebook_runtime_id_value", + name="name_value", ) mock_args.update(sample_request) @@ -8377,20 +11512,20 @@ def test_assign_notebook_runtime_rest_flattened(): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.assign_notebook_runtime(**mock_args) + client.upgrade_notebook_runtime(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes:assign" + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade" % client.transport._host, args[1], ) -def test_assign_notebook_runtime_rest_flattened_error(transport: str = "rest"): +def test_upgrade_notebook_runtime_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -8399,16 +11534,13 @@ def test_assign_notebook_runtime_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.assign_notebook_runtime( - notebook_service.AssignNotebookRuntimeRequest(), - parent="parent_value", - notebook_runtime_template="notebook_runtime_template_value", - notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), - notebook_runtime_id="notebook_runtime_id_value", + client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", ) -def test_assign_notebook_runtime_rest_error(): +def test_upgrade_notebook_runtime_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -8417,11 +11549,11 @@ def test_assign_notebook_runtime_rest_error(): @pytest.mark.parametrize( "request_type", [ - notebook_service.GetNotebookRuntimeRequest, + notebook_service.StartNotebookRuntimeRequest, dict, ], ) -def test_get_notebook_runtime_rest(request_type): +def test_start_notebook_runtime_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8436,58 +11568,22 @@ def test_get_notebook_runtime_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntime( - name="name_value", - runtime_user="runtime_user_value", - proxy_uri="proxy_uri_value", - health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, - display_name="display_name_value", - description="description_value", - service_account="service_account_value", - runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, - is_upgradable=True, - version="version_value", - notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, - network_tags=["network_tags_value"], - satisfies_pzs=True, - satisfies_pzi=True, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_notebook_runtime(request) + response = client.start_notebook_runtime(request) # Establish that the response is the type that we expect. - assert isinstance(response, notebook_runtime.NotebookRuntime) - assert response.name == "name_value" - assert response.runtime_user == "runtime_user_value" - assert response.proxy_uri == "proxy_uri_value" - assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY - assert response.display_name == "display_name_value" - assert response.description == "description_value" - assert response.service_account == "service_account_value" - assert ( - response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING - ) - assert response.is_upgradable is True - assert response.version == "version_value" - assert ( - response.notebook_runtime_type - == notebook_runtime.NotebookRuntimeType.USER_DEFINED - ) - assert response.network_tags == ["network_tags_value"] - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True + assert response.operation.name == "operations/spam" -def test_get_notebook_runtime_rest_use_cached_wrapped_rpc(): +def test_start_notebook_runtime_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -8502,7 +11598,8 @@ def test_get_notebook_runtime_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_notebook_runtime in client._transport._wrapped_methods + client._transport.start_notebook_runtime + in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -8511,24 +11608,28 @@ def test_get_notebook_runtime_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_notebook_runtime + client._transport.start_notebook_runtime ] = mock_rpc request = {} - client.get_notebook_runtime(request) + client.start_notebook_runtime(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_notebook_runtime(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.start_notebook_runtime(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_notebook_runtime_rest_required_fields( - request_type=notebook_service.GetNotebookRuntimeRequest, +def test_start_notebook_runtime_rest_required_fields( + request_type=notebook_service.StartNotebookRuntimeRequest, ): transport_class = transports.NotebookServiceRestTransport @@ -8544,7 +11645,7 @@ def test_get_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -8553,7 +11654,7 @@ def test_get_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -8567,7 +11668,7 @@ def test_get_notebook_runtime_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntime() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -8579,39 +11680,37 @@ def test_get_notebook_runtime_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_notebook_runtime(request) + response = client.start_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_notebook_runtime_rest_unset_required_fields(): +def test_start_notebook_runtime_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_notebook_runtime._get_unset_required_fields({}) + unset_fields = transport.start_notebook_runtime._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_notebook_runtime_rest_interceptors(null_interceptor): +def test_start_notebook_runtime_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8624,14 +11723,16 @@ def test_get_notebook_runtime_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_start_notebook_runtime" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime" + transports.NotebookServiceRestInterceptor, "pre_start_notebook_runtime" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.GetNotebookRuntimeRequest.pb( - notebook_service.GetNotebookRuntimeRequest() + pb_message = notebook_service.StartNotebookRuntimeRequest.pb( + notebook_service.StartNotebookRuntimeRequest() ) transcode.return_value = { "method": "post", @@ -8643,19 +11744,19 @@ def test_get_notebook_runtime_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = notebook_runtime.NotebookRuntime.to_json( - notebook_runtime.NotebookRuntime() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = notebook_service.GetNotebookRuntimeRequest() + request = notebook_service.StartNotebookRuntimeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = notebook_runtime.NotebookRuntime() + post.return_value = operations_pb2.Operation() - client.get_notebook_runtime( + client.start_notebook_runtime( request, metadata=[ ("key", "val"), @@ -8667,8 +11768,8 @@ def test_get_notebook_runtime_rest_interceptors(null_interceptor): post.assert_called_once() -def test_get_notebook_runtime_rest_bad_request( - transport: str = "rest", request_type=notebook_service.GetNotebookRuntimeRequest +def test_start_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.StartNotebookRuntimeRequest ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8690,10 +11791,10 @@ def test_get_notebook_runtime_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.get_notebook_runtime(request) + client.start_notebook_runtime(request) -def test_get_notebook_runtime_rest_flattened(): +def test_start_notebook_runtime_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8702,7 +11803,7 @@ def test_get_notebook_runtime_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_runtime.NotebookRuntime() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = { @@ -8718,26 +11819,24 @@ def test_get_notebook_runtime_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_runtime.NotebookRuntime.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_notebook_runtime(**mock_args) + client.start_notebook_runtime(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:start" % client.transport._host, args[1], ) -def test_get_notebook_runtime_rest_flattened_error(transport: str = "rest"): +def test_start_notebook_runtime_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -8746,59 +11845,155 @@ def test_get_notebook_runtime_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_notebook_runtime( - notebook_service.GetNotebookRuntimeRequest(), + client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), name="name_value", ) -def test_get_notebook_runtime_rest_error(): - client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_start_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookExecutionJobRequest, + dict, + ], +) +def test_create_notebook_execution_job_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["notebook_execution_job"] = { + "dataform_repository_source": { + "dataform_repository_resource_name": "dataform_repository_resource_name_value", + "commit_sha": "commit_sha_value", + }, + "gcs_notebook_source": {"uri": "uri_value", "generation": "generation_value"}, + "direct_notebook_source": {"content": b"content_blob"}, + "notebook_runtime_template_resource_name": "notebook_runtime_template_resource_name_value", + "gcs_output_uri": "gcs_output_uri_value", + "execution_user": "execution_user_value", + "service_account": "service_account_value", + "name": "name_value", + "display_name": "display_name_value", + "execution_timeout": {"seconds": 751, "nanos": 543}, + "schedule_resource_name": "schedule_resource_name_value", + "job_state": 1, + "status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "labels": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = notebook_service.CreateNotebookExecutionJobRequest.meta.fields[ + "notebook_execution_job" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "notebook_execution_job" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value -@pytest.mark.parametrize( - "request_type", - [ - notebook_service.ListNotebookRuntimesRequest, - dict, - ], -) -def test_list_notebook_runtimes_rest(request_type): - client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/locations/sample2"} + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["notebook_execution_job"][field])): + del request_init["notebook_execution_job"][field][i][subfield] + else: + del request_init["notebook_execution_job"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimesResponse( - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_notebook_runtimes(request) + response = client.create_notebook_execution_job(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListNotebookRuntimesPager) - assert response.next_page_token == "next_page_token_value" + assert response.operation.name == "operations/spam" -def test_list_notebook_runtimes_rest_use_cached_wrapped_rpc(): +def test_create_notebook_execution_job_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -8813,7 +12008,7 @@ def test_list_notebook_runtimes_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_notebook_runtimes + client._transport.create_notebook_execution_job in client._transport._wrapped_methods ) @@ -8823,24 +12018,28 @@ def test_list_notebook_runtimes_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_notebook_runtimes + client._transport.create_notebook_execution_job ] = mock_rpc request = {} - client.list_notebook_runtimes(request) + client.create_notebook_execution_job(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_notebook_runtimes(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_notebook_execution_job(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_notebook_runtimes_rest_required_fields( - request_type=notebook_service.ListNotebookRuntimesRequest, +def test_create_notebook_execution_job_rest_required_fields( + request_type=notebook_service.CreateNotebookExecutionJobRequest, ): transport_class = transports.NotebookServiceRestTransport @@ -8856,7 +12055,7 @@ def test_list_notebook_runtimes_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + ).create_notebook_execution_job._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -8865,17 +12064,9 @@ def test_list_notebook_runtimes_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + ).create_notebook_execution_job._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - "read_mask", - ) - ) + assert not set(unset_fields) - set(("notebook_execution_job_id",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -8889,7 +12080,7 @@ def test_list_notebook_runtimes_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimesResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -8901,52 +12092,47 @@ def test_list_notebook_runtimes_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimesResponse.pb( - return_value - ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_notebook_runtimes(request) + response = client.create_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_notebook_runtimes_rest_unset_required_fields(): +def test_create_notebook_execution_job_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_notebook_runtimes._get_unset_required_fields({}) + unset_fields = transport.create_notebook_execution_job._get_unset_required_fields( + {} + ) assert set(unset_fields) == ( - set( + set(("notebookExecutionJobId",)) + & set( ( - "filter", - "orderBy", - "pageSize", - "pageToken", - "readMask", + "parent", + "notebookExecutionJob", ) ) - & set(("parent",)) ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_notebook_runtimes_rest_interceptors(null_interceptor): +def test_create_notebook_execution_job_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8959,14 +12145,16 @@ def test_list_notebook_runtimes_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_list_notebook_runtimes" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_create_notebook_execution_job" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtimes" + transports.NotebookServiceRestInterceptor, "pre_create_notebook_execution_job" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.ListNotebookRuntimesRequest.pb( - notebook_service.ListNotebookRuntimesRequest() + pb_message = notebook_service.CreateNotebookExecutionJobRequest.pb( + notebook_service.CreateNotebookExecutionJobRequest() ) transcode.return_value = { "method": "post", @@ -8978,21 +12166,19 @@ def test_list_notebook_runtimes_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = ( - notebook_service.ListNotebookRuntimesResponse.to_json( - notebook_service.ListNotebookRuntimesResponse() - ) + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = notebook_service.ListNotebookRuntimesRequest() + request = notebook_service.CreateNotebookExecutionJobRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = notebook_service.ListNotebookRuntimesResponse() + post.return_value = operations_pb2.Operation() - client.list_notebook_runtimes( + client.create_notebook_execution_job( request, metadata=[ ("key", "val"), @@ -9004,8 +12190,9 @@ def test_list_notebook_runtimes_rest_interceptors(null_interceptor): post.assert_called_once() -def test_list_notebook_runtimes_rest_bad_request( - transport: str = "rest", request_type=notebook_service.ListNotebookRuntimesRequest +def test_create_notebook_execution_job_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.CreateNotebookExecutionJobRequest, ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9025,10 +12212,10 @@ def test_list_notebook_runtimes_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.list_notebook_runtimes(request) + client.create_notebook_execution_job(request) -def test_list_notebook_runtimes_rest_flattened(): +def test_create_notebook_execution_job_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9037,7 +12224,7 @@ def test_list_notebook_runtimes_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = notebook_service.ListNotebookRuntimesResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = {"parent": "projects/sample1/locations/sample2"} @@ -9045,32 +12232,36 @@ def test_list_notebook_runtimes_rest_flattened(): # get truthy value for each flattened field mock_args = dict( parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.list_notebook_runtimes(**mock_args) + client.create_notebook_execution_job(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes" + "%s/v1/{parent=projects/*/locations/*}/notebookExecutionJobs" % client.transport._host, args[1], ) -def test_list_notebook_runtimes_rest_flattened_error(transport: str = "rest"): +def test_create_notebook_execution_job_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9079,83 +12270,32 @@ def test_list_notebook_runtimes_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_notebook_runtimes( - notebook_service.ListNotebookRuntimesRequest(), + client.create_notebook_execution_job( + notebook_service.CreateNotebookExecutionJobRequest(), parent="parent_value", + notebook_execution_job=gca_notebook_execution_job.NotebookExecutionJob( + dataform_repository_source=gca_notebook_execution_job.NotebookExecutionJob.DataformRepositorySource( + dataform_repository_resource_name="dataform_repository_resource_name_value" + ) + ), + notebook_execution_job_id="notebook_execution_job_id_value", ) -def test_list_notebook_runtimes_rest_pager(transport: str = "rest"): +def test_create_notebook_execution_job_rest_error(): client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - notebook_service.ListNotebookRuntimesResponse( - notebook_runtimes=[ - notebook_runtime.NotebookRuntime(), - notebook_runtime.NotebookRuntime(), - notebook_runtime.NotebookRuntime(), - ], - next_page_token="abc", - ), - notebook_service.ListNotebookRuntimesResponse( - notebook_runtimes=[], - next_page_token="def", - ), - notebook_service.ListNotebookRuntimesResponse( - notebook_runtimes=[ - notebook_runtime.NotebookRuntime(), - ], - next_page_token="ghi", - ), - notebook_service.ListNotebookRuntimesResponse( - notebook_runtimes=[ - notebook_runtime.NotebookRuntime(), - notebook_runtime.NotebookRuntime(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - notebook_service.ListNotebookRuntimesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/locations/sample2"} - - pager = client.list_notebook_runtimes(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) - - pages = list(client.list_notebook_runtimes(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - @pytest.mark.parametrize( "request_type", [ - notebook_service.DeleteNotebookRuntimeRequest, + notebook_service.GetNotebookExecutionJobRequest, dict, ], ) -def test_delete_notebook_runtime_rest(request_type): +def test_get_notebook_execution_job_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9163,29 +12303,43 @@ def test_delete_notebook_runtime_rest(request_type): # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_execution_job.NotebookExecutionJob( + name="name_value", + display_name="display_name_value", + schedule_resource_name="schedule_resource_name_value", + job_state=job_state.JobState.JOB_STATE_QUEUED, + notebook_runtime_template_resource_name="notebook_runtime_template_resource_name_value", + gcs_output_uri="gcs_output_uri_value", + execution_user="execution_user_value", + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_execution_job.NotebookExecutionJob.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_notebook_runtime(request) + response = client.get_notebook_execution_job(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert isinstance(response, notebook_execution_job.NotebookExecutionJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.schedule_resource_name == "schedule_resource_name_value" + assert response.job_state == job_state.JobState.JOB_STATE_QUEUED -def test_delete_notebook_runtime_rest_use_cached_wrapped_rpc(): +def test_get_notebook_execution_job_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -9200,7 +12354,7 @@ def test_delete_notebook_runtime_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.delete_notebook_runtime + client._transport.get_notebook_execution_job in client._transport._wrapped_methods ) @@ -9210,28 +12364,24 @@ def test_delete_notebook_runtime_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.delete_notebook_runtime + client._transport.get_notebook_execution_job ] = mock_rpc request = {} - client.delete_notebook_runtime(request) + client.get_notebook_execution_job(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.delete_notebook_runtime(request) + client.get_notebook_execution_job(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_notebook_runtime_rest_required_fields( - request_type=notebook_service.DeleteNotebookRuntimeRequest, +def test_get_notebook_execution_job_rest_required_fields( + request_type=notebook_service.GetNotebookExecutionJobRequest, ): transport_class = transports.NotebookServiceRestTransport @@ -9247,7 +12397,7 @@ def test_delete_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + ).get_notebook_execution_job._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -9256,7 +12406,9 @@ def test_delete_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + ).get_notebook_execution_job._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -9270,7 +12422,7 @@ def test_delete_notebook_runtime_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_execution_job.NotebookExecutionJob() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -9282,36 +12434,39 @@ def test_delete_notebook_runtime_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_execution_job.NotebookExecutionJob.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_notebook_runtime(request) + response = client.get_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_notebook_runtime_rest_unset_required_fields(): +def test_get_notebook_execution_job_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_notebook_runtime._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.get_notebook_execution_job._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_notebook_runtime_rest_interceptors(null_interceptor): +def test_get_notebook_execution_job_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9324,16 +12479,14 @@ def test_delete_notebook_runtime_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_delete_notebook_runtime" + transports.NotebookServiceRestInterceptor, "post_get_notebook_execution_job" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_delete_notebook_runtime" + transports.NotebookServiceRestInterceptor, "pre_get_notebook_execution_job" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.DeleteNotebookRuntimeRequest.pb( - notebook_service.DeleteNotebookRuntimeRequest() + pb_message = notebook_service.GetNotebookExecutionJobRequest.pb( + notebook_service.GetNotebookExecutionJobRequest() ) transcode.return_value = { "method": "post", @@ -9345,19 +12498,19 @@ def test_delete_notebook_runtime_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + req.return_value._content = notebook_execution_job.NotebookExecutionJob.to_json( + notebook_execution_job.NotebookExecutionJob() ) - request = notebook_service.DeleteNotebookRuntimeRequest() + request = notebook_service.GetNotebookExecutionJobRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = notebook_execution_job.NotebookExecutionJob() - client.delete_notebook_runtime( + client.get_notebook_execution_job( request, metadata=[ ("key", "val"), @@ -9369,8 +12522,9 @@ def test_delete_notebook_runtime_rest_interceptors(null_interceptor): post.assert_called_once() -def test_delete_notebook_runtime_rest_bad_request( - transport: str = "rest", request_type=notebook_service.DeleteNotebookRuntimeRequest +def test_get_notebook_execution_job_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.GetNotebookExecutionJobRequest, ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9379,7 +12533,7 @@ def test_delete_notebook_runtime_rest_bad_request( # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } request = request_type(**request_init) @@ -9392,10 +12546,10 @@ def test_delete_notebook_runtime_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.delete_notebook_runtime(request) + client.get_notebook_execution_job(request) -def test_delete_notebook_runtime_rest_flattened(): +def test_get_notebook_execution_job_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9404,11 +12558,11 @@ def test_delete_notebook_runtime_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_execution_job.NotebookExecutionJob() # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } # get truthy value for each flattened field @@ -9420,24 +12574,26 @@ def test_delete_notebook_runtime_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_execution_job.NotebookExecutionJob.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_notebook_runtime(**mock_args) + client.get_notebook_execution_job(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" + "%s/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}" % client.transport._host, args[1], ) -def test_delete_notebook_runtime_rest_flattened_error(transport: str = "rest"): +def test_get_notebook_execution_job_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9446,13 +12602,13 @@ def test_delete_notebook_runtime_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_notebook_runtime( - notebook_service.DeleteNotebookRuntimeRequest(), + client.get_notebook_execution_job( + notebook_service.GetNotebookExecutionJobRequest(), name="name_value", ) -def test_delete_notebook_runtime_rest_error(): +def test_get_notebook_execution_job_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -9461,41 +12617,46 @@ def test_delete_notebook_runtime_rest_error(): @pytest.mark.parametrize( "request_type", [ - notebook_service.UpgradeNotebookRuntimeRequest, + notebook_service.ListNotebookExecutionJobsRequest, dict, ], ) -def test_upgrade_notebook_runtime_rest(request_type): +def test_list_notebook_execution_jobs_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookExecutionJobsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookExecutionJobsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.upgrade_notebook_runtime(request) + response = client.list_notebook_execution_jobs(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert isinstance(response, pagers.ListNotebookExecutionJobsPager) + assert response.next_page_token == "next_page_token_value" -def test_upgrade_notebook_runtime_rest_use_cached_wrapped_rpc(): +def test_list_notebook_execution_jobs_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -9510,7 +12671,7 @@ def test_upgrade_notebook_runtime_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.upgrade_notebook_runtime + client._transport.list_notebook_execution_jobs in client._transport._wrapped_methods ) @@ -9520,33 +12681,29 @@ def test_upgrade_notebook_runtime_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.upgrade_notebook_runtime + client._transport.list_notebook_execution_jobs ] = mock_rpc request = {} - client.upgrade_notebook_runtime(request) + client.list_notebook_execution_jobs(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.upgrade_notebook_runtime(request) + client.list_notebook_execution_jobs(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_upgrade_notebook_runtime_rest_required_fields( - request_type=notebook_service.UpgradeNotebookRuntimeRequest, +def test_list_notebook_execution_jobs_rest_required_fields( + request_type=notebook_service.ListNotebookExecutionJobsRequest, ): transport_class = transports.NotebookServiceRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -9557,21 +12714,31 @@ def test_upgrade_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + ).list_notebook_execution_jobs._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + ).list_notebook_execution_jobs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "view", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9580,7 +12747,7 @@ def test_upgrade_notebook_runtime_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookExecutionJobsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -9592,37 +12759,52 @@ def test_upgrade_notebook_runtime_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookExecutionJobsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.upgrade_notebook_runtime(request) + response = client.list_notebook_execution_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_upgrade_notebook_runtime_rest_unset_required_fields(): +def test_list_notebook_execution_jobs_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.upgrade_notebook_runtime._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.list_notebook_execution_jobs._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): +def test_list_notebook_execution_jobs_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9635,16 +12817,14 @@ def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_upgrade_notebook_runtime" + transports.NotebookServiceRestInterceptor, "post_list_notebook_execution_jobs" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_upgrade_notebook_runtime" + transports.NotebookServiceRestInterceptor, "pre_list_notebook_execution_jobs" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.UpgradeNotebookRuntimeRequest.pb( - notebook_service.UpgradeNotebookRuntimeRequest() + pb_message = notebook_service.ListNotebookExecutionJobsRequest.pb( + notebook_service.ListNotebookExecutionJobsRequest() ) transcode.return_value = { "method": "post", @@ -9656,19 +12836,21 @@ def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + req.return_value._content = ( + notebook_service.ListNotebookExecutionJobsResponse.to_json( + notebook_service.ListNotebookExecutionJobsResponse() + ) ) - request = notebook_service.UpgradeNotebookRuntimeRequest() + request = notebook_service.ListNotebookExecutionJobsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = notebook_service.ListNotebookExecutionJobsResponse() - client.upgrade_notebook_runtime( + client.list_notebook_execution_jobs( request, metadata=[ ("key", "val"), @@ -9680,8 +12862,9 @@ def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): post.assert_called_once() -def test_upgrade_notebook_runtime_rest_bad_request( - transport: str = "rest", request_type=notebook_service.UpgradeNotebookRuntimeRequest +def test_list_notebook_execution_jobs_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.ListNotebookExecutionJobsRequest, ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9689,9 +12872,7 @@ def test_upgrade_notebook_runtime_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" - } + request_init = {"parent": "projects/sample1/locations/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9703,10 +12884,10 @@ def test_upgrade_notebook_runtime_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.upgrade_notebook_runtime(request) + client.list_notebook_execution_jobs(request) -def test_upgrade_notebook_runtime_rest_flattened(): +def test_list_notebook_execution_jobs_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9715,40 +12896,42 @@ def test_upgrade_notebook_runtime_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = notebook_service.ListNotebookExecutionJobsResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" - } + sample_request = {"parent": "projects/sample1/locations/sample2"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookExecutionJobsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.upgrade_notebook_runtime(**mock_args) + client.list_notebook_execution_jobs(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade" + "%s/v1/{parent=projects/*/locations/*}/notebookExecutionJobs" % client.transport._host, args[1], ) -def test_upgrade_notebook_runtime_rest_flattened_error(transport: str = "rest"): +def test_list_notebook_execution_jobs_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9757,26 +12940,86 @@ def test_upgrade_notebook_runtime_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.upgrade_notebook_runtime( - notebook_service.UpgradeNotebookRuntimeRequest(), - name="name_value", + client.list_notebook_execution_jobs( + notebook_service.ListNotebookExecutionJobsRequest(), + parent="parent_value", ) -def test_upgrade_notebook_runtime_rest_error(): +def test_list_notebook_execution_jobs_rest_pager(transport: str = "rest"): client = NotebookServiceClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[], + next_page_token="def", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookExecutionJobsResponse( + notebook_execution_jobs=[ + notebook_execution_job.NotebookExecutionJob(), + notebook_execution_job.NotebookExecutionJob(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookExecutionJobsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_execution_jobs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_execution_job.NotebookExecutionJob) for i in results + ) + + pages = list(client.list_notebook_execution_jobs(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + @pytest.mark.parametrize( "request_type", [ - notebook_service.StartNotebookRuntimeRequest, + notebook_service.DeleteNotebookExecutionJobRequest, dict, ], ) -def test_start_notebook_runtime_rest(request_type): +def test_delete_notebook_execution_job_rest(request_type): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9784,7 +13027,7 @@ def test_start_notebook_runtime_rest(request_type): # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } request = request_type(**request_init) @@ -9800,13 +13043,13 @@ def test_start_notebook_runtime_rest(request_type): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.start_notebook_runtime(request) + response = client.delete_notebook_execution_job(request) # Establish that the response is the type that we expect. assert response.operation.name == "operations/spam" -def test_start_notebook_runtime_rest_use_cached_wrapped_rpc(): +def test_delete_notebook_execution_job_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -9821,7 +13064,7 @@ def test_start_notebook_runtime_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.start_notebook_runtime + client._transport.delete_notebook_execution_job in client._transport._wrapped_methods ) @@ -9831,11 +13074,11 @@ def test_start_notebook_runtime_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.start_notebook_runtime + client._transport.delete_notebook_execution_job ] = mock_rpc request = {} - client.start_notebook_runtime(request) + client.delete_notebook_execution_job(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -9844,15 +13087,15 @@ def test_start_notebook_runtime_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.start_notebook_runtime(request) + client.delete_notebook_execution_job(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_start_notebook_runtime_rest_required_fields( - request_type=notebook_service.StartNotebookRuntimeRequest, +def test_delete_notebook_execution_job_rest_required_fields( + request_type=notebook_service.DeleteNotebookExecutionJobRequest, ): transport_class = transports.NotebookServiceRestTransport @@ -9868,7 +13111,7 @@ def test_start_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + ).delete_notebook_execution_job._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -9877,7 +13120,7 @@ def test_start_notebook_runtime_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + ).delete_notebook_execution_job._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -9903,10 +13146,9 @@ def test_start_notebook_runtime_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() @@ -9916,24 +13158,26 @@ def test_start_notebook_runtime_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.start_notebook_runtime(request) + response = client.delete_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_start_notebook_runtime_rest_unset_required_fields(): +def test_delete_notebook_execution_job_rest_unset_required_fields(): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.start_notebook_runtime._get_unset_required_fields({}) + unset_fields = transport.delete_notebook_execution_job._get_unset_required_fields( + {} + ) assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_start_notebook_runtime_rest_interceptors(null_interceptor): +def test_delete_notebook_execution_job_rest_interceptors(null_interceptor): transport = transports.NotebookServiceRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9948,14 +13192,14 @@ def test_start_notebook_runtime_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.NotebookServiceRestInterceptor, "post_start_notebook_runtime" + transports.NotebookServiceRestInterceptor, "post_delete_notebook_execution_job" ) as post, mock.patch.object( - transports.NotebookServiceRestInterceptor, "pre_start_notebook_runtime" + transports.NotebookServiceRestInterceptor, "pre_delete_notebook_execution_job" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = notebook_service.StartNotebookRuntimeRequest.pb( - notebook_service.StartNotebookRuntimeRequest() + pb_message = notebook_service.DeleteNotebookExecutionJobRequest.pb( + notebook_service.DeleteNotebookExecutionJobRequest() ) transcode.return_value = { "method": "post", @@ -9971,7 +13215,7 @@ def test_start_notebook_runtime_rest_interceptors(null_interceptor): operations_pb2.Operation() ) - request = notebook_service.StartNotebookRuntimeRequest() + request = notebook_service.DeleteNotebookExecutionJobRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -9979,7 +13223,7 @@ def test_start_notebook_runtime_rest_interceptors(null_interceptor): pre.return_value = request, metadata post.return_value = operations_pb2.Operation() - client.start_notebook_runtime( + client.delete_notebook_execution_job( request, metadata=[ ("key", "val"), @@ -9991,8 +13235,9 @@ def test_start_notebook_runtime_rest_interceptors(null_interceptor): post.assert_called_once() -def test_start_notebook_runtime_rest_bad_request( - transport: str = "rest", request_type=notebook_service.StartNotebookRuntimeRequest +def test_delete_notebook_execution_job_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.DeleteNotebookExecutionJobRequest, ): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -10001,7 +13246,7 @@ def test_start_notebook_runtime_rest_bad_request( # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } request = request_type(**request_init) @@ -10014,10 +13259,10 @@ def test_start_notebook_runtime_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.start_notebook_runtime(request) + client.delete_notebook_execution_job(request) -def test_start_notebook_runtime_rest_flattened(): +def test_delete_notebook_execution_job_rest_flattened(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -10030,7 +13275,7 @@ def test_start_notebook_runtime_rest_flattened(): # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + "name": "projects/sample1/locations/sample2/notebookExecutionJobs/sample3" } # get truthy value for each flattened field @@ -10046,20 +13291,20 @@ def test_start_notebook_runtime_rest_flattened(): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.start_notebook_runtime(**mock_args) + client.delete_notebook_execution_job(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:start" + "%s/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}" % client.transport._host, args[1], ) -def test_start_notebook_runtime_rest_flattened_error(transport: str = "rest"): +def test_delete_notebook_execution_job_rest_flattened_error(transport: str = "rest"): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -10068,13 +13313,13 @@ def test_start_notebook_runtime_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.start_notebook_runtime( - notebook_service.StartNotebookRuntimeRequest(), + client.delete_notebook_execution_job( + notebook_service.DeleteNotebookExecutionJobRequest(), name="name_value", ) -def test_start_notebook_runtime_rest_error(): +def test_delete_notebook_execution_job_rest_error(): client = NotebookServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -10230,6 +13475,10 @@ def test_notebook_service_base_transport(): "delete_notebook_runtime", "upgrade_notebook_runtime", "start_notebook_runtime", + "create_notebook_execution_job", + "get_notebook_execution_job", + "list_notebook_execution_jobs", + "delete_notebook_execution_job", "set_iam_policy", "get_iam_policy", "test_iam_permissions", @@ -10553,6 +13802,18 @@ def test_notebook_service_client_transport_session_collision(transport_name): session1 = client1.transport.start_notebook_runtime._session session2 = client2.transport.start_notebook_runtime._session assert session1 != session2 + session1 = client1.transport.create_notebook_execution_job._session + session2 = client2.transport.create_notebook_execution_job._session + assert session1 != session2 + session1 = client1.transport.get_notebook_execution_job._session + session2 = client2.transport.get_notebook_execution_job._session + assert session1 != session2 + session1 = client1.transport.list_notebook_execution_jobs._session + session2 = client2.transport.list_notebook_execution_jobs._session + assert session1 != session2 + session1 = client1.transport.delete_notebook_execution_job._session + session2 = client2.transport.delete_notebook_execution_job._session + assert session1 != session2 def test_notebook_service_grpc_transport_channel(): @@ -10738,10 +13999,38 @@ def test_parse_network_path(): assert expected == actual -def test_notebook_runtime_path(): +def test_notebook_execution_job_path(): project = "oyster" location = "nudibranch" - notebook_runtime = "cuttlefish" + notebook_execution_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/notebookExecutionJobs/{notebook_execution_job}".format( + project=project, + location=location, + notebook_execution_job=notebook_execution_job, + ) + actual = NotebookServiceClient.notebook_execution_job_path( + project, location, notebook_execution_job + ) + assert expected == actual + + +def test_parse_notebook_execution_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "notebook_execution_job": "nautilus", + } + path = NotebookServiceClient.notebook_execution_job_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_notebook_execution_job_path(path) + assert expected == actual + + +def test_notebook_runtime_path(): + project = "scallop" + location = "abalone" + notebook_runtime = "squid" expected = "projects/{project}/locations/{location}/notebookRuntimes/{notebook_runtime}".format( project=project, location=location, @@ -10755,9 +14044,9 @@ def test_notebook_runtime_path(): def test_parse_notebook_runtime_path(): expected = { - "project": "mussel", - "location": "winkle", - "notebook_runtime": "nautilus", + "project": "clam", + "location": "whelk", + "notebook_runtime": "octopus", } path = NotebookServiceClient.notebook_runtime_path(**expected) @@ -10767,9 +14056,9 @@ def test_parse_notebook_runtime_path(): def test_notebook_runtime_template_path(): - project = "scallop" - location = "abalone" - notebook_runtime_template = "squid" + project = "oyster" + location = "nudibranch" + notebook_runtime_template = "cuttlefish" expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( project=project, location=location, @@ -10783,9 +14072,9 @@ def test_notebook_runtime_template_path(): def test_parse_notebook_runtime_template_path(): expected = { - "project": "clam", - "location": "whelk", - "notebook_runtime_template": "octopus", + "project": "mussel", + "location": "winkle", + "notebook_runtime_template": "nautilus", } path = NotebookServiceClient.notebook_runtime_template_path(**expected) @@ -10794,6 +14083,32 @@ def test_parse_notebook_runtime_template_path(): assert expected == actual +def test_schedule_path(): + project = "scallop" + location = "abalone" + schedule = "squid" + expected = "projects/{project}/locations/{location}/schedules/{schedule}".format( + project=project, + location=location, + schedule=schedule, + ) + actual = NotebookServiceClient.schedule_path(project, location, schedule) + assert expected == actual + + +def test_parse_schedule_path(): + expected = { + "project": "clam", + "location": "whelk", + "schedule": "octopus", + } + path = NotebookServiceClient.schedule_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_schedule_path(path) + assert expected == actual + + def test_subnetwork_path(): project = "oyster" region = "nudibranch" diff --git a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py index bcd20c37b2..c1820d952a 100644 --- a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py @@ -1407,12 +1407,7 @@ async def test_create_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_persistent_resource ] = mock_object @@ -1841,12 +1836,7 @@ async def test_get_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_persistent_resource ] = mock_object @@ -2244,12 +2234,7 @@ async def test_list_persistent_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_persistent_resources ] = mock_object @@ -2844,12 +2829,7 @@ async def test_delete_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_persistent_resource ] = mock_object @@ -3233,12 +3213,7 @@ async def test_update_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_persistent_resource ] = mock_object @@ -3644,12 +3619,7 @@ async def test_reboot_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.reboot_persistent_resource ] = mock_object @@ -3927,6 +3897,7 @@ def test_create_persistent_resource_rest(request_type): "resource_pool_images": {}, "head_node_resource_pool_id": "head_node_resource_pool_id_value", "ray_metric_spec": {"disabled": True}, + "ray_logs_spec": {"disabled": True}, }, }, "resource_runtime": {"access_uris": {}}, @@ -4155,6 +4126,7 @@ def test_create_persistent_resource_rest_required_fields( "persistentResourceId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4493,7 +4465,7 @@ def test_get_persistent_resource_rest_required_fields( response = client.get_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4821,7 +4793,7 @@ def test_list_persistent_resources_rest_required_fields( response = client.list_persistent_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5204,7 +5176,7 @@ def test_delete_persistent_resource_rest_required_fields( response = client.delete_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5439,6 +5411,7 @@ def test_update_persistent_resource_rest(request_type): "resource_pool_images": {}, "head_node_resource_pool_id": "head_node_resource_pool_id_value", "ray_metric_spec": {"disabled": True}, + "ray_logs_spec": {"disabled": True}, }, }, "resource_runtime": {"access_uris": {}}, @@ -5647,7 +5620,7 @@ def test_update_persistent_resource_rest_required_fields( response = client.update_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5979,7 +5952,7 @@ def test_reboot_persistent_resource_rest_required_fields( response = client.reboot_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 2946091ef1..b4f387f2af 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -1371,12 +1371,7 @@ async def test_create_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_training_pipeline ] = mock_object @@ -1799,12 +1794,7 @@ async def test_get_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_training_pipeline ] = mock_object @@ -2206,12 +2196,7 @@ async def test_list_training_pipelines_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_training_pipelines ] = mock_object @@ -2796,12 +2781,7 @@ async def test_delete_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_training_pipeline ] = mock_object @@ -3183,12 +3163,7 @@ async def test_cancel_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_training_pipeline ] = mock_object @@ -3592,12 +3567,7 @@ async def test_create_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_pipeline_job ] = mock_object @@ -4034,12 +4004,7 @@ async def test_get_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_pipeline_job ] = mock_object @@ -4437,12 +4402,7 @@ async def test_list_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_pipeline_jobs ] = mock_object @@ -5026,12 +4986,7 @@ async def test_delete_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_pipeline_job ] = mock_object @@ -5419,12 +5374,7 @@ async def test_batch_delete_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_delete_pipeline_jobs ] = mock_object @@ -5815,12 +5765,7 @@ async def test_cancel_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_pipeline_job ] = mock_object @@ -6198,12 +6143,7 @@ async def test_batch_cancel_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_cancel_pipeline_jobs ] = mock_object @@ -6836,7 +6776,7 @@ def test_create_training_pipeline_rest_required_fields( response = client.create_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7166,7 +7106,7 @@ def test_get_training_pipeline_rest_required_fields( response = client.get_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7489,7 +7429,7 @@ def test_list_training_pipelines_rest_required_fields( response = client.list_training_pipelines(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7864,7 +7804,7 @@ def test_delete_training_pipeline_rest_required_fields( response = client.delete_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8171,7 +8111,7 @@ def test_cancel_training_pipeline_rest_required_fields( response = client.cancel_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8661,7 +8601,7 @@ def test_create_pipeline_job_rest_required_fields( response = client.create_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8994,7 +8934,7 @@ def test_get_pipeline_job_rest_required_fields( response = client.get_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9313,7 +9253,7 @@ def test_list_pipeline_jobs_rest_required_fields( response = client.list_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9684,7 +9624,7 @@ def test_delete_pipeline_job_rest_required_fields( response = client.delete_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9995,7 +9935,7 @@ def test_batch_delete_pipeline_jobs_rest_required_fields( response = client.batch_delete_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10306,7 +10246,7 @@ def test_cancel_pipeline_job_rest_required_fields( response = client.cancel_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10607,7 +10547,7 @@ def test_batch_cancel_pipeline_jobs_rest_required_fields( response = client.batch_cancel_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index 3a9f4e2b03..b11c9d47ef 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -1349,12 +1349,7 @@ async def test_predict_async_use_cached_wrapped_rpc(transport: str = "grpc_async ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.predict ] = mock_object @@ -1678,12 +1673,7 @@ async def test_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.raw_predict ] = mock_object @@ -2066,12 +2056,7 @@ async def test_stream_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_raw_predict ] = mock_object @@ -2445,12 +2430,7 @@ async def test_direct_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.direct_predict ] = mock_object @@ -2744,12 +2724,7 @@ async def test_direct_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.direct_raw_predict ] = mock_object @@ -2973,12 +2948,7 @@ async def test_stream_direct_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_direct_predict ] = mock_object @@ -3136,12 +3106,7 @@ async def test_stream_direct_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_direct_raw_predict ] = mock_object @@ -3296,12 +3261,7 @@ async def test_streaming_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_predict ] = mock_object @@ -3534,12 +3494,7 @@ async def test_server_streaming_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.server_streaming_predict ] = mock_object @@ -3763,12 +3718,7 @@ async def test_streaming_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_raw_predict ] = mock_object @@ -3991,12 +3941,7 @@ async def test_explain_async_use_cached_wrapped_rpc(transport: str = "grpc_async ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.explain ] = mock_object @@ -4310,12 +4255,7 @@ async def test_generate_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.generate_content ] = mock_object @@ -4697,12 +4637,7 @@ async def test_stream_generate_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_generate_content ] = mock_object @@ -5071,7 +5006,7 @@ def test_predict_rest_required_fields(request_type=prediction_service.PredictReq response = client.predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5387,7 +5322,7 @@ def test_raw_predict_rest_required_fields( response = client.raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5703,7 +5638,7 @@ def test_stream_raw_predict_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.stream_raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6008,7 +5943,7 @@ def test_direct_predict_rest_required_fields( response = client.direct_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6259,7 +6194,7 @@ def test_direct_raw_predict_rest_required_fields( response = client.direct_raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6551,7 +6486,7 @@ def test_server_streaming_predict_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.server_streaming_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6808,7 +6743,7 @@ def test_explain_rest_required_fields(request_type=prediction_service.ExplainReq response = client.explain(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7127,7 +7062,7 @@ def test_generate_content_rest_required_fields( response = client.generate_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7455,7 +7390,7 @@ def test_stream_generate_content_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.stream_generate_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py index 2465df6e55..63e7e759aa 100644 --- a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py @@ -1358,12 +1358,7 @@ async def test_create_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_schedule ] = mock_object @@ -1750,12 +1745,7 @@ async def test_delete_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_schedule ] = mock_object @@ -2142,12 +2132,7 @@ async def test_get_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_schedule ] = mock_object @@ -2527,12 +2512,7 @@ async def test_list_schedules_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_schedules ] = mock_object @@ -3079,12 +3059,7 @@ async def test_pause_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.pause_schedule ] = mock_object @@ -3432,12 +3407,7 @@ async def test_resume_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.resume_schedule ] = mock_object @@ -3820,12 +3790,7 @@ async def test_update_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_schedule ] = mock_object @@ -4412,7 +4377,7 @@ def test_create_schedule_rest_required_fields( response = client.create_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4720,7 +4685,7 @@ def test_delete_schedule_rest_required_fields( response = client.delete_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5039,7 +5004,7 @@ def test_get_schedule_rest_required_fields( response = client.get_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5350,7 +5315,7 @@ def test_list_schedules_rest_required_fields( response = client.list_schedules(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5712,7 +5677,7 @@ def test_pause_schedule_rest_required_fields( response = client.pause_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6000,7 +5965,7 @@ def test_resume_schedule_rest_required_fields( response = client.resume_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6514,7 +6479,7 @@ def test_update_schedule_rest_required_fields( response = client.update_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index 311449ea7f..8e06d860c0 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -1383,12 +1383,7 @@ async def test_create_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_specialist_pool ] = mock_object @@ -1801,12 +1796,7 @@ async def test_get_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_specialist_pool ] = mock_object @@ -2206,12 +2196,7 @@ async def test_list_specialist_pools_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_specialist_pools ] = mock_object @@ -2796,12 +2781,7 @@ async def test_delete_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_specialist_pool ] = mock_object @@ -3185,12 +3165,7 @@ async def test_update_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_specialist_pool ] = mock_object @@ -3645,7 +3620,7 @@ def test_create_specialist_pool_rest_required_fields( response = client.create_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3975,7 +3950,7 @@ def test_get_specialist_pool_rest_required_fields( response = client.get_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4300,7 +4275,7 @@ def test_list_specialist_pools_rest_required_fields( response = client.list_specialist_pools(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4680,7 +4655,7 @@ def test_delete_specialist_pool_rest_required_fields( response = client.delete_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5077,7 +5052,7 @@ def test_update_specialist_pool_rest_required_fields( response = client.update_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py index 8aec3a6820..cded3dfb68 100644 --- a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py @@ -1370,12 +1370,7 @@ async def test_create_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard ] = mock_object @@ -1785,12 +1780,7 @@ async def test_get_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard ] = mock_object @@ -2178,12 +2168,7 @@ async def test_update_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard ] = mock_object @@ -2585,12 +2570,7 @@ async def test_list_tensorboards_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboards ] = mock_object @@ -3174,12 +3154,7 @@ async def test_delete_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard ] = mock_object @@ -3563,12 +3538,7 @@ async def test_read_tensorboard_usage_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_usage ] = mock_object @@ -3953,12 +3923,7 @@ async def test_read_tensorboard_size_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_size ] = mock_object @@ -4360,12 +4325,7 @@ async def test_create_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_experiment ] = mock_object @@ -4801,12 +4761,7 @@ async def test_get_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_experiment ] = mock_object @@ -5210,12 +5165,7 @@ async def test_update_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_experiment ] = mock_object @@ -5635,12 +5585,7 @@ async def test_list_tensorboard_experiments_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_experiments ] = mock_object @@ -6230,12 +6175,7 @@ async def test_delete_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_experiment ] = mock_object @@ -6635,12 +6575,7 @@ async def test_create_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_run ] = mock_object @@ -7049,12 +6984,7 @@ async def test_batch_create_tensorboard_runs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_tensorboard_runs ] = mock_object @@ -7469,12 +7399,7 @@ async def test_get_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_run ] = mock_object @@ -7873,12 +7798,7 @@ async def test_update_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_run ] = mock_object @@ -8288,12 +8208,7 @@ async def test_list_tensorboard_runs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_runs ] = mock_object @@ -8878,12 +8793,7 @@ async def test_delete_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_run ] = mock_object @@ -9271,12 +9181,7 @@ async def test_batch_create_tensorboard_time_series_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_tensorboard_time_series ] = mock_object @@ -9726,12 +9631,7 @@ async def test_create_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_time_series ] = mock_object @@ -10173,12 +10073,7 @@ async def test_get_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_time_series ] = mock_object @@ -10598,12 +10493,7 @@ async def test_update_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_time_series ] = mock_object @@ -11030,12 +10920,7 @@ async def test_list_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_time_series ] = mock_object @@ -11626,12 +11511,7 @@ async def test_delete_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_time_series ] = mock_object @@ -12023,12 +11903,7 @@ async def test_batch_read_tensorboard_time_series_data_async_use_cached_wrapped_ ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_read_tensorboard_time_series_data ] = mock_object @@ -12420,12 +12295,7 @@ async def test_read_tensorboard_time_series_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_time_series_data ] = mock_object @@ -12811,12 +12681,7 @@ async def test_read_tensorboard_blob_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_blob_data ] = mock_object @@ -13205,12 +13070,7 @@ async def test_write_tensorboard_experiment_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_tensorboard_experiment_data ] = mock_object @@ -13626,12 +13486,7 @@ async def test_write_tensorboard_run_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_tensorboard_run_data ] = mock_object @@ -14056,12 +13911,7 @@ async def test_export_tensorboard_time_series_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_tensorboard_time_series_data ] = mock_object @@ -14708,7 +14558,7 @@ def test_create_tensorboard_rest_required_fields( response = client.create_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15037,7 +14887,7 @@ def test_get_tensorboard_rest_required_fields( response = client.get_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15426,7 +15276,7 @@ def test_update_tensorboard_rest_required_fields( response = client.update_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15759,7 +15609,7 @@ def test_list_tensorboards_rest_required_fields( response = client.list_tensorboards(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16132,7 +15982,7 @@ def test_delete_tensorboard_rest_required_fields( response = client.delete_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16443,7 +16293,7 @@ def test_read_tensorboard_usage_rest_required_fields( response = client.read_tensorboard_usage(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16762,7 +16612,7 @@ def test_read_tensorboard_size_rest_required_fields( response = client.read_tensorboard_size(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17188,6 +17038,7 @@ def test_create_tensorboard_experiment_rest_required_fields( "tensorboardExperimentId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17531,7 +17382,7 @@ def test_get_tensorboard_experiment_rest_required_fields( response = client.get_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17939,7 +17790,7 @@ def test_update_tensorboard_experiment_rest_required_fields( response = client.update_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18290,7 +18141,7 @@ def test_list_tensorboard_experiments_rest_required_fields( response = client.list_tensorboard_experiments(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18677,7 +18528,7 @@ def test_delete_tensorboard_experiment_rest_required_fields( response = client.delete_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19095,6 +18946,7 @@ def test_create_tensorboard_run_rest_required_fields( "tensorboardRunId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19425,7 +19277,7 @@ def test_batch_create_tensorboard_runs_rest_required_fields( response = client.batch_create_tensorboard_runs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19767,7 +19619,7 @@ def test_get_tensorboard_run_rest_required_fields( response = client.get_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20165,7 +20017,7 @@ def test_update_tensorboard_run_rest_required_fields( response = client.update_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20506,7 +20358,7 @@ def test_list_tensorboard_runs_rest_required_fields( response = client.list_tensorboard_runs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20888,7 +20740,7 @@ def test_delete_tensorboard_run_rest_required_fields( response = client.delete_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21209,7 +21061,7 @@ def test_batch_create_tensorboard_time_series_rest_required_fields( response = client.batch_create_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21663,7 +21515,7 @@ def test_create_tensorboard_time_series_rest_required_fields( response = client.create_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22017,7 +21869,7 @@ def test_get_tensorboard_time_series_rest_required_fields( response = client.get_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22440,7 +22292,7 @@ def test_update_tensorboard_time_series_rest_required_fields( response = client.update_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22795,7 +22647,7 @@ def test_list_tensorboard_time_series_rest_required_fields( response = client.list_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23185,7 +23037,7 @@ def test_delete_tensorboard_time_series_rest_required_fields( response = client.delete_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23529,6 +23381,7 @@ def test_batch_read_tensorboard_time_series_data_rest_required_fields( "timeSeries", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23876,7 +23729,7 @@ def test_read_tensorboard_time_series_data_rest_required_fields( response = client.read_tensorboard_time_series_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24222,7 +24075,7 @@ def test_read_tensorboard_blob_data_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.read_tensorboard_blob_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24551,7 +24404,7 @@ def test_write_tensorboard_experiment_data_rest_required_fields( response = client.write_tensorboard_experiment_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24896,7 +24749,7 @@ def test_write_tensorboard_run_data_rest_required_fields( response = client.write_tensorboard_run_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25240,7 +25093,7 @@ def test_export_tensorboard_time_series_data_rest_required_fields( response = client.export_tensorboard_time_series_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py index 5fb4cb008d..aea6ad63d5 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -1303,12 +1303,7 @@ async def test_create_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_study ] = mock_object @@ -1691,12 +1686,7 @@ async def test_get_study_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_study ] = mock_object @@ -2064,12 +2054,7 @@ async def test_list_studies_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_studies ] = mock_object @@ -2616,12 +2601,7 @@ async def test_delete_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_study ] = mock_object @@ -2987,12 +2967,7 @@ async def test_lookup_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.lookup_study ] = mock_object @@ -3359,12 +3334,7 @@ async def test_suggest_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.suggest_trials ] = mock_object @@ -3662,12 +3632,7 @@ async def test_create_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_trial ] = mock_object @@ -4060,12 +4025,7 @@ async def test_get_trial_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_trial ] = mock_object @@ -4437,12 +4397,7 @@ async def test_list_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_trials ] = mock_object @@ -5024,12 +4979,7 @@ async def test_add_trial_measurement_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_trial_measurement ] = mock_object @@ -5343,12 +5293,7 @@ async def test_complete_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.complete_trial ] = mock_object @@ -5631,12 +5576,7 @@ async def test_delete_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_trial ] = mock_object @@ -6003,12 +5943,7 @@ async def test_check_trial_early_stopping_state_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.check_trial_early_stopping_state ] = mock_object @@ -6311,12 +6246,7 @@ async def test_stop_trial_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stop_trial ] = mock_object @@ -6613,12 +6543,7 @@ async def test_list_optimal_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_optimal_trials ] = mock_object @@ -7123,7 +7048,7 @@ def test_create_study_rest_required_fields( response = client.create_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7437,7 +7362,7 @@ def test_get_study_rest_required_fields(request_type=vizier_service.GetStudyRequ response = client.get_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7742,7 +7667,7 @@ def test_list_studies_rest_required_fields( response = client.list_studies(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8101,7 +8026,7 @@ def test_delete_study_rest_required_fields( response = client.delete_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8404,7 +8329,7 @@ def test_lookup_study_rest_required_fields( response = client.lookup_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8718,7 +8643,7 @@ def test_suggest_trials_rest_required_fields( response = client.suggest_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9083,7 +9008,7 @@ def test_create_trial_rest_required_fields( response = client.create_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9406,7 +9331,7 @@ def test_get_trial_rest_required_fields(request_type=vizier_service.GetTrialRequ response = client.get_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9716,7 +9641,7 @@ def test_list_trials_rest_required_fields( response = client.list_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10104,7 +10029,7 @@ def test_add_trial_measurement_rest_required_fields( response = client.add_trial_measurement(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10371,7 +10296,7 @@ def test_complete_trial_rest_required_fields( response = client.complete_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10611,7 +10536,7 @@ def test_delete_trial_rest_required_fields( response = client.delete_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10912,7 +10837,7 @@ def test_check_trial_early_stopping_state_rest_required_fields( response = client.check_trial_early_stopping_state(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11176,7 +11101,7 @@ def test_stop_trial_rest_required_fields(request_type=vizier_service.StopTrialRe response = client.stop_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11424,7 +11349,7 @@ def test_list_optimal_trials_rest_required_fields( response = client.list_optimal_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 323f27feb3..bd67758ad4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -1310,12 +1310,7 @@ async def test_create_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_dataset ] = mock_object @@ -1555,6 +1550,8 @@ def test_get_dataset(request_type, transport: str = "grpc"): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_dataset(request) @@ -1574,6 +1571,8 @@ def test_get_dataset(request_type, transport: str = "grpc"): assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_dataset_empty_call(): @@ -1680,6 +1679,8 @@ async def test_get_dataset_empty_call_async(): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_dataset() @@ -1711,12 +1712,7 @@ async def test_get_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_dataset ] = mock_object @@ -1760,6 +1756,8 @@ async def test_get_dataset_async( etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_dataset(request) @@ -1780,6 +1778,8 @@ async def test_get_dataset_async( assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -1955,6 +1955,8 @@ def test_update_dataset(request_type, transport: str = "grpc"): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_dataset(request) @@ -1974,6 +1976,8 @@ def test_update_dataset(request_type, transport: str = "grpc"): assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_dataset_empty_call(): @@ -2076,6 +2080,8 @@ async def test_update_dataset_empty_call_async(): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_dataset() @@ -2107,12 +2113,7 @@ async def test_update_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_dataset ] = mock_object @@ -2156,6 +2157,8 @@ async def test_update_dataset_async( etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_dataset(request) @@ -2176,6 +2179,8 @@ async def test_update_dataset_async( assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2502,12 +2507,7 @@ async def test_list_datasets_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_datasets ] = mock_object @@ -3060,12 +3060,7 @@ async def test_delete_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_dataset ] = mock_object @@ -3429,12 +3424,7 @@ async def test_import_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_data ] = mock_object @@ -3820,12 +3810,7 @@ async def test_export_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_data ] = mock_object @@ -4236,12 +4221,7 @@ async def test_create_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_dataset_version ] = mock_object @@ -4491,6 +4471,8 @@ def test_update_dataset_version(request_type, transport: str = "grpc"): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_dataset_version(request) @@ -4507,6 +4489,8 @@ def test_update_dataset_version(request_type, transport: str = "grpc"): assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_dataset_version_empty_call(): @@ -4617,6 +4601,8 @@ async def test_update_dataset_version_empty_call_async(): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_dataset_version() @@ -4648,12 +4634,7 @@ async def test_update_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_dataset_version ] = mock_object @@ -4697,6 +4678,8 @@ async def test_update_dataset_version_async( big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_dataset_version(request) @@ -4714,6 +4697,8 @@ async def test_update_dataset_version_async( assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -5058,12 +5043,7 @@ async def test_delete_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_dataset_version ] = mock_object @@ -5303,6 +5283,8 @@ def test_get_dataset_version(request_type, transport: str = "grpc"): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_dataset_version(request) @@ -5319,6 +5301,8 @@ def test_get_dataset_version(request_type, transport: str = "grpc"): assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_dataset_version_empty_call(): @@ -5432,6 +5416,8 @@ async def test_get_dataset_version_empty_call_async(): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_dataset_version() @@ -5463,12 +5449,7 @@ async def test_get_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_dataset_version ] = mock_object @@ -5512,6 +5493,8 @@ async def test_get_dataset_version_async( big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_dataset_version(request) @@ -5529,6 +5512,8 @@ async def test_get_dataset_version_async( assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -5870,12 +5855,7 @@ async def test_list_dataset_versions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_dataset_versions ] = mock_object @@ -6460,12 +6440,7 @@ async def test_restore_dataset_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.restore_dataset_version ] = mock_object @@ -6847,12 +6822,7 @@ async def test_list_data_items_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_data_items ] = mock_object @@ -7430,12 +7400,7 @@ async def test_search_data_items_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_data_items ] = mock_object @@ -7939,12 +7904,7 @@ async def test_list_saved_queries_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_saved_queries ] = mock_object @@ -8528,12 +8488,7 @@ async def test_delete_saved_query_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_saved_query ] = mock_object @@ -8927,12 +8882,7 @@ async def test_get_annotation_spec_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_annotation_spec ] = mock_object @@ -9319,12 +9269,7 @@ async def test_list_annotations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_annotations ] = mock_object @@ -9764,6 +9709,8 @@ def test_create_dataset_rest(request_type): "encryption_spec": {"kms_key_name": "kms_key_name_value"}, "metadata_artifact": "metadata_artifact_value", "model_reference": "model_reference_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -9959,7 +9906,7 @@ def test_create_dataset_rest_required_fields( response = client.create_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10156,6 +10103,8 @@ def test_get_dataset_rest(request_type): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -10179,6 +10128,8 @@ def test_get_dataset_rest(request_type): assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_dataset_rest_use_cached_wrapped_rpc(): @@ -10288,7 +10239,7 @@ def test_get_dataset_rest_required_fields( response = client.get_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10496,6 +10447,8 @@ def test_update_dataset_rest(request_type): "encryption_spec": {"kms_key_name": "kms_key_name_value"}, "metadata_artifact": "metadata_artifact_value", "model_reference": "model_reference_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -10578,6 +10531,8 @@ def get_message_fields(field): etag="etag_value", metadata_artifact="metadata_artifact_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -10601,6 +10556,8 @@ def get_message_fields(field): assert response.etag == "etag_value" assert response.metadata_artifact == "metadata_artifact_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_dataset_rest_use_cached_wrapped_rpc(): @@ -10706,7 +10663,7 @@ def test_update_dataset_rest_required_fields( response = client.update_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11031,7 +10988,7 @@ def test_list_datasets_rest_required_fields( response = client.list_datasets(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11398,7 +11355,7 @@ def test_delete_dataset_rest_required_fields( response = client.delete_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11698,7 +11655,7 @@ def test_import_data_rest_required_fields( response = client.import_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12012,7 +11969,7 @@ def test_export_data_rest_required_fields( response = client.export_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12219,6 +12176,8 @@ def test_create_dataset_version_rest(request_type): "list_value": {"values": {}}, }, "model_reference": "model_reference_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -12421,7 +12380,7 @@ def test_create_dataset_version_rest_required_fields( response = client.create_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12626,6 +12585,8 @@ def test_update_dataset_version_rest(request_type): "list_value": {"values": {}}, }, "model_reference": "model_reference_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -12707,6 +12668,8 @@ def get_message_fields(field): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -12727,6 +12690,8 @@ def get_message_fields(field): assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_dataset_version_rest_use_cached_wrapped_rpc(): @@ -12837,7 +12802,7 @@ def test_update_dataset_version_rest_required_fields( response = client.update_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13161,7 +13126,7 @@ def test_delete_dataset_version_rest_required_fields( response = client.delete_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13351,6 +13316,8 @@ def test_get_dataset_version_rest(request_type): big_query_dataset_name="big_query_dataset_name_value", display_name="display_name_value", model_reference="model_reference_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -13371,6 +13338,8 @@ def test_get_dataset_version_rest(request_type): assert response.big_query_dataset_name == "big_query_dataset_name_value" assert response.display_name == "display_name_value" assert response.model_reference == "model_reference_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_dataset_version_rest_use_cached_wrapped_rpc(): @@ -13484,7 +13453,7 @@ def test_get_dataset_version_rest_required_fields( response = client.get_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13806,7 +13775,7 @@ def test_list_dataset_versions_rest_required_fields( response = client.list_dataset_versions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14184,7 +14153,7 @@ def test_restore_dataset_version_rest_required_fields( response = client.restore_dataset_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14501,7 +14470,7 @@ def test_list_data_items_rest_required_fields( response = client.list_data_items(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14895,7 +14864,7 @@ def test_search_data_items_rest_required_fields( response = client.search_data_items(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15232,7 +15201,7 @@ def test_list_saved_queries_rest_required_fields( response = client.list_saved_queries(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15609,7 +15578,7 @@ def test_delete_saved_query_rest_required_fields( response = client.delete_saved_query(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15928,7 +15897,7 @@ def test_get_annotation_spec_rest_required_fields( response = client.get_annotation_spec(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16249,7 +16218,7 @@ def test_list_annotations_rest_required_fields( response = client.list_annotations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py index 07b07d4981..e0ceb095a0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py @@ -1428,12 +1428,7 @@ async def test_create_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_deployment_resource_pool ] = mock_object @@ -1703,6 +1698,8 @@ def test_get_deployment_resource_pool(request_type, transport: str = "grpc"): name="name_value", service_account="service_account_value", disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_deployment_resource_pool(request) @@ -1717,6 +1714,8 @@ def test_get_deployment_resource_pool(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.service_account == "service_account_value" assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_deployment_resource_pool_empty_call(): @@ -1834,6 +1833,8 @@ async def test_get_deployment_resource_pool_empty_call_async(): name="name_value", service_account="service_account_value", disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_deployment_resource_pool() @@ -1868,12 +1869,7 @@ async def test_get_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_deployment_resource_pool ] = mock_object @@ -1915,6 +1911,8 @@ async def test_get_deployment_resource_pool_async( name="name_value", service_account="service_account_value", disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_deployment_resource_pool(request) @@ -1930,6 +1928,8 @@ async def test_get_deployment_resource_pool_async( assert response.name == "name_value" assert response.service_account == "service_account_value" assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2277,12 +2277,7 @@ async def test_list_deployment_resource_pools_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_deployment_resource_pools ] = mock_object @@ -2884,12 +2879,7 @@ async def test_update_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_deployment_resource_pool ] = mock_object @@ -3307,12 +3297,7 @@ async def test_delete_deployment_resource_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_deployment_resource_pool ] = mock_object @@ -3711,12 +3696,7 @@ async def test_query_deployed_models_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_deployed_models ] = mock_object @@ -4291,7 +4271,7 @@ def test_create_deployment_resource_pool_rest_required_fields( response = client.create_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4499,6 +4479,8 @@ def test_get_deployment_resource_pool_rest(request_type): name="name_value", service_account="service_account_value", disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -4517,6 +4499,8 @@ def test_get_deployment_resource_pool_rest(request_type): assert response.name == "name_value" assert response.service_account == "service_account_value" assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_deployment_resource_pool_rest_use_cached_wrapped_rpc(): @@ -4631,7 +4615,7 @@ def test_get_deployment_resource_pool_rest_required_fields( response = client.get_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4969,7 +4953,7 @@ def test_list_deployment_resource_pools_rest_required_fields( response = client.list_deployment_resource_pools(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5253,6 +5237,8 @@ def test_update_deployment_resource_pool_rest(request_type): "service_account": "service_account_value", "disable_container_logging": True, "create_time": {"seconds": 751, "nanos": 543}, + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -5454,7 +5440,7 @@ def test_update_deployment_resource_pool_rest_required_fields( response = client.update_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5789,7 +5775,7 @@ def test_delete_deployment_resource_pool_rest_required_fields( response = client.delete_deployment_resource_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6129,7 +6115,7 @@ def test_query_deployed_models_rest_required_fields( response = client.query_deployed_models(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index c4911f7d86..cfd91e7e2b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -1335,12 +1335,7 @@ async def test_create_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_endpoint ] = mock_object @@ -1589,6 +1584,10 @@ def test_get_endpoint(request_type, transport: str = "grpc"): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_endpoint(request) @@ -1610,6 +1609,10 @@ def test_get_endpoint(request_type, transport: str = "grpc"): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_endpoint_empty_call(): @@ -1715,6 +1718,10 @@ async def test_get_endpoint_empty_call_async(): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_endpoint() @@ -1746,12 +1753,7 @@ async def test_get_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_endpoint ] = mock_object @@ -1794,6 +1796,10 @@ async def test_get_endpoint_async( network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_endpoint(request) @@ -1816,6 +1822,10 @@ async def test_get_endpoint_async( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2130,12 +2140,7 @@ async def test_list_endpoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_endpoints ] = mock_object @@ -2553,6 +2558,10 @@ def test_update_endpoint(request_type, transport: str = "grpc"): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_endpoint(request) @@ -2574,6 +2583,10 @@ def test_update_endpoint(request_type, transport: str = "grpc"): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_endpoint_empty_call(): @@ -2675,6 +2688,10 @@ async def test_update_endpoint_empty_call_async(): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_endpoint() @@ -2706,12 +2723,7 @@ async def test_update_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_endpoint ] = mock_object @@ -2754,6 +2766,10 @@ async def test_update_endpoint_async( network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_endpoint(request) @@ -2776,6 +2792,10 @@ async def test_update_endpoint_async( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -3099,12 +3119,7 @@ async def test_delete_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_endpoint ] = mock_object @@ -3468,12 +3483,7 @@ async def test_deploy_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.deploy_model ] = mock_object @@ -3895,12 +3905,7 @@ async def test_undeploy_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.undeploy_model ] = mock_object @@ -4297,12 +4302,7 @@ async def test_mutate_deployed_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_deployed_model ] = mock_object @@ -4695,6 +4695,10 @@ def test_create_endpoint_rest(request_type): "sampling_rate": 0.13820000000000002, "bigquery_destination": {"output_uri": "output_uri_value"}, }, + "dedicated_endpoint_enabled": True, + "dedicated_endpoint_dns": "dedicated_endpoint_dns_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -4892,7 +4896,7 @@ def test_create_endpoint_rest_required_fields( response = client.create_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5090,6 +5094,10 @@ def test_get_endpoint_rest(request_type): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -5115,6 +5123,10 @@ def test_get_endpoint_rest(request_type): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_endpoint_rest_use_cached_wrapped_rpc(): @@ -5222,7 +5234,7 @@ def test_get_endpoint_rest_required_fields( response = client.get_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5534,7 +5546,7 @@ def test_list_endpoints_rest_required_fields( response = client.list_endpoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5886,6 +5898,10 @@ def test_update_endpoint_rest(request_type): "sampling_rate": 0.13820000000000002, "bigquery_destination": {"output_uri": "output_uri_value"}, }, + "dedicated_endpoint_enabled": True, + "dedicated_endpoint_dns": "dedicated_endpoint_dns_value", + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -5967,6 +5983,10 @@ def get_message_fields(field): network="network_value", enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", + dedicated_endpoint_enabled=True, + dedicated_endpoint_dns="dedicated_endpoint_dns_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -5992,6 +6012,10 @@ def get_message_fields(field): response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" ) + assert response.dedicated_endpoint_enabled is True + assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_endpoint_rest_use_cached_wrapped_rpc(): @@ -6097,7 +6121,7 @@ def test_update_endpoint_rest_required_fields( response = client.update_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6410,7 +6434,7 @@ def test_delete_endpoint_rest_required_fields( response = client.delete_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6712,7 +6736,7 @@ def test_deploy_model_rest_required_fields( response = client.deploy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7042,7 +7066,7 @@ def test_undeploy_model_rest_required_fields( response = client.undeploy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7361,7 +7385,7 @@ def test_mutate_deployed_model_rest_required_fields( response = client.mutate_deployed_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py index 3b92622e1b..ccd697fbff 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py @@ -1336,12 +1336,7 @@ async def test_evaluate_instances_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.evaluate_instances ] = mock_object @@ -1610,7 +1605,7 @@ def test_evaluate_instances_rest_required_fields( response = client.evaluate_instances(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py index d6981240ca..5715e32b44 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py @@ -1394,12 +1394,7 @@ async def test_execute_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.execute_extension ] = mock_object @@ -1784,12 +1779,7 @@ async def test_query_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_extension ] = mock_object @@ -2156,7 +2146,7 @@ def test_execute_extension_rest_required_fields( response = client.execute_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2481,7 +2471,7 @@ def test_query_extension_rest_required_fields( response = client.query_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py index 9a7ff5be15..15ba4b6825 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py @@ -1386,12 +1386,7 @@ async def test_import_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_extension ] = mock_object @@ -1776,12 +1771,7 @@ async def test_get_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_extension ] = mock_object @@ -2154,12 +2144,7 @@ async def test_list_extensions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_extensions ] = mock_object @@ -2721,12 +2706,7 @@ async def test_update_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_extension ] = mock_object @@ -3108,12 +3088,7 @@ async def test_delete_extension_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_extension ] = mock_object @@ -3633,7 +3608,7 @@ def test_import_extension_rest_required_fields( response = client.import_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3953,7 +3928,7 @@ def test_get_extension_rest_required_fields( response = client.get_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4269,7 +4244,7 @@ def test_list_extensions_rest_required_fields( response = client.list_extensions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4827,7 +4802,7 @@ def test_update_extension_rest_required_fields( response = client.update_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5145,7 +5120,7 @@ def test_delete_extension_rest_required_fields( response = client.delete_extension(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py index 2f703e87ab..3826036d0c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py @@ -1434,12 +1434,7 @@ async def test_create_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_online_store ] = mock_object @@ -1896,12 +1891,7 @@ async def test_get_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_online_store ] = mock_object @@ -2309,12 +2299,7 @@ async def test_list_feature_online_stores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_online_stores ] = mock_object @@ -2914,12 +2899,7 @@ async def test_update_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_online_store ] = mock_object @@ -3361,12 +3341,7 @@ async def test_delete_feature_online_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_online_store ] = mock_object @@ -3765,12 +3740,7 @@ async def test_create_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_view ] = mock_object @@ -4204,12 +4174,7 @@ async def test_get_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_view ] = mock_object @@ -4601,12 +4566,7 @@ async def test_list_feature_views_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_views ] = mock_object @@ -5192,12 +5152,7 @@ async def test_update_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_view ] = mock_object @@ -5618,12 +5573,7 @@ async def test_delete_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_view ] = mock_object @@ -6011,12 +5961,7 @@ async def test_sync_feature_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.sync_feature_view ] = mock_object @@ -6406,12 +6351,7 @@ async def test_get_feature_view_sync_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_view_sync ] = mock_object @@ -6813,12 +6753,7 @@ async def test_list_feature_view_syncs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_view_syncs ] = mock_object @@ -7499,6 +7434,7 @@ def test_create_feature_online_store_rest_required_fields( "featureOnlineStoreId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7843,7 +7779,7 @@ def test_get_feature_online_store_rest_required_fields( response = client.get_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8177,7 +8113,7 @@ def test_list_feature_online_stores_rest_required_fields( response = client.list_feature_online_stores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8672,7 +8608,7 @@ def test_update_feature_online_store_rest_required_fields( response = client.update_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9007,7 +8943,7 @@ def test_delete_feature_online_store_rest_required_fields( response = client.delete_feature_online_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9458,6 +9394,7 @@ def test_create_feature_view_rest_required_fields( "featureViewId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9807,7 +9744,7 @@ def test_get_feature_view_rest_required_fields( response = client.get_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10137,7 +10074,7 @@ def test_list_feature_views_rest_required_fields( response = client.list_feature_views(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10640,7 +10577,7 @@ def test_update_feature_view_rest_required_fields( response = client.update_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10966,7 +10903,7 @@ def test_delete_feature_view_rest_required_fields( response = client.delete_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11289,7 +11226,7 @@ def test_sync_feature_view_rest_required_fields( response = client.sync_feature_view(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11610,7 +11547,7 @@ def test_get_feature_view_sync_rest_required_fields( response = client.get_feature_view_sync(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11944,7 +11881,7 @@ def test_list_feature_view_syncs_rest_required_fields( response = client.list_feature_view_syncs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py index 846724ce11..72df206de0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py @@ -1388,12 +1388,7 @@ async def test_fetch_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.fetch_feature_values ] = mock_object @@ -1714,12 +1709,7 @@ async def test_streaming_fetch_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_fetch_feature_values ] = mock_object @@ -1956,12 +1946,7 @@ async def test_search_nearest_entities_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_nearest_entities ] = mock_object @@ -2238,7 +2223,7 @@ def test_fetch_feature_values_rest_required_fields( response = client.fetch_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2576,7 +2561,7 @@ def test_search_nearest_entities_rest_required_fields( response = client.search_nearest_entities(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py index c3893d9463..9bb1af8e00 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py @@ -1394,12 +1394,7 @@ async def test_create_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature_group ] = mock_object @@ -1835,12 +1830,7 @@ async def test_get_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature_group ] = mock_object @@ -2237,12 +2227,7 @@ async def test_list_feature_groups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_feature_groups ] = mock_object @@ -2822,12 +2807,7 @@ async def test_update_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature_group ] = mock_object @@ -3248,12 +3228,7 @@ async def test_delete_feature_group_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_group ] = mock_object @@ -3640,12 +3615,7 @@ async def test_create_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature ] = mock_object @@ -4049,12 +4019,7 @@ async def test_get_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature ] = mock_object @@ -4432,12 +4397,7 @@ async def test_list_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_features ] = mock_object @@ -4987,12 +4947,7 @@ async def test_update_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature ] = mock_object @@ -5367,12 +5322,7 @@ async def test_delete_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature ] = mock_object @@ -5818,6 +5768,7 @@ def test_create_feature_group_rest_required_fields( "featureGroupId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6149,7 +6100,7 @@ def test_get_feature_group_rest_required_fields( response = client.get_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6472,7 +6423,7 @@ def test_list_feature_groups_rest_required_fields( response = client.list_feature_groups(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6934,7 +6885,7 @@ def test_update_feature_group_rest_required_fields( response = client.update_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7258,7 +7209,7 @@ def test_delete_feature_group_rest_required_fields( response = client.delete_feature_group(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7681,6 +7632,7 @@ def test_create_feature_rest_required_fields( "featureId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8015,7 +7967,7 @@ def test_get_feature_rest_required_fields( response = client.get_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8333,7 +8285,7 @@ def test_list_features_rest_required_fields( response = client.list_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8811,7 +8763,7 @@ def test_update_feature_rest_required_fields( response = client.update_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9122,7 +9074,7 @@ def test_delete_feature_rest_required_fields( response = client.delete_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py index 8b941bb0e4..5e7e538f44 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -1415,12 +1415,7 @@ async def test_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_feature_values ] = mock_object @@ -1810,12 +1805,7 @@ async def test_streaming_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_read_feature_values ] = mock_object @@ -2201,12 +2191,7 @@ async def test_write_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_feature_values ] = mock_object @@ -2605,7 +2590,7 @@ def test_read_feature_values_rest_required_fields( response = client.read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2951,7 +2936,7 @@ def test_streaming_read_feature_values_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.streaming_read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3288,7 +3273,7 @@ def test_write_feature_values_rest_required_fields( response = client.write_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index 285794b653..38eadb21b0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -1394,12 +1394,7 @@ async def test_create_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_featurestore ] = mock_object @@ -1806,12 +1801,7 @@ async def test_get_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_featurestore ] = mock_object @@ -2200,12 +2190,7 @@ async def test_list_featurestores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_featurestores ] = mock_object @@ -2785,12 +2770,7 @@ async def test_update_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_featurestore ] = mock_object @@ -3187,12 +3167,7 @@ async def test_delete_featurestore_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_featurestore ] = mock_object @@ -3591,12 +3566,7 @@ async def test_create_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_entity_type ] = mock_object @@ -4001,12 +3971,7 @@ async def test_get_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_entity_type ] = mock_object @@ -4393,12 +4358,7 @@ async def test_list_entity_types_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_entity_types ] = mock_object @@ -4988,12 +4948,7 @@ async def test_update_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_entity_type ] = mock_object @@ -5395,12 +5350,7 @@ async def test_delete_entity_type_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_entity_type ] = mock_object @@ -5787,12 +5737,7 @@ async def test_create_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_feature ] = mock_object @@ -6190,12 +6135,7 @@ async def test_batch_create_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_features ] = mock_object @@ -6599,12 +6539,7 @@ async def test_get_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_feature ] = mock_object @@ -6982,12 +6917,7 @@ async def test_list_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_features ] = mock_object @@ -7556,12 +7486,7 @@ async def test_update_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_feature ] = mock_object @@ -7943,12 +7868,7 @@ async def test_delete_feature_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature ] = mock_object @@ -8330,12 +8250,7 @@ async def test_import_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_feature_values ] = mock_object @@ -8723,12 +8638,7 @@ async def test_batch_read_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_read_feature_values ] = mock_object @@ -9116,12 +9026,7 @@ async def test_export_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_feature_values ] = mock_object @@ -9509,12 +9414,7 @@ async def test_delete_feature_values_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_feature_values ] = mock_object @@ -9894,12 +9794,7 @@ async def test_search_features_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_features ] = mock_object @@ -10547,6 +10442,7 @@ def test_create_featurestore_rest_required_fields( "featurestoreId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10871,7 +10767,7 @@ def test_get_featurestore_rest_required_fields( response = client.get_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11192,7 +11088,7 @@ def test_list_featurestores_rest_required_fields( response = client.list_featurestores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11654,7 +11550,7 @@ def test_update_featurestore_rest_required_fields( response = client.update_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11969,7 +11865,7 @@ def test_delete_featurestore_rest_required_fields( response = client.delete_featurestore(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12380,6 +12276,7 @@ def test_create_entity_type_rest_required_fields( "entityTypeId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12707,7 +12604,7 @@ def test_get_entity_type_rest_required_fields( response = client.get_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13028,7 +12925,7 @@ def test_list_entity_types_rest_required_fields( response = client.list_entity_types(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13506,7 +13403,7 @@ def test_update_entity_type_rest_required_fields( response = client.update_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13823,7 +13720,7 @@ def test_delete_entity_type_rest_required_fields( response = client.delete_entity_type(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14247,6 +14144,7 @@ def test_create_feature_rest_required_fields( "featureId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14571,7 +14469,7 @@ def test_batch_create_features_rest_required_fields( response = client.batch_create_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14903,7 +14801,7 @@ def test_get_feature_rest_required_fields( response = client.get_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15221,7 +15119,7 @@ def test_list_features_rest_required_fields( response = client.list_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15715,7 +15613,7 @@ def test_update_feature_rest_required_fields( response = client.update_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16024,7 +15922,7 @@ def test_delete_feature_rest_required_fields( response = client.delete_feature(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16335,7 +16233,7 @@ def test_import_feature_values_rest_required_fields( response = client.import_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16655,7 +16553,7 @@ def test_batch_read_feature_values_rest_required_fields( response = client.batch_read_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16976,7 +16874,7 @@ def test_export_feature_values_rest_required_fields( response = client.export_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17297,7 +17195,7 @@ def test_delete_feature_values_rest_required_fields( response = client.delete_feature_values(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17613,7 +17511,7 @@ def test_search_features_rest_required_fields( response = client.search_features(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 0d4bc4f537..c8d5a71a86 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -1199,6 +1199,7 @@ def test_create_cached_content(request_type, transport: str = "grpc"): # Designate an appropriate return value for the call. call.return_value = gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) response = client.create_cached_content(request) @@ -1212,6 +1213,7 @@ def test_create_cached_content(request_type, transport: str = "grpc"): # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -1323,6 +1325,7 @@ async def test_create_cached_content_empty_call_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -1355,12 +1358,7 @@ async def test_create_cached_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_cached_content ] = mock_object @@ -1400,6 +1398,7 @@ async def test_create_cached_content_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -1414,6 +1413,7 @@ async def test_create_cached_content_async( # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -1619,6 +1619,7 @@ def test_get_cached_content(request_type, transport: str = "grpc"): # Designate an appropriate return value for the call. call.return_value = cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) response = client.get_cached_content(request) @@ -1632,6 +1633,7 @@ def test_get_cached_content(request_type, transport: str = "grpc"): # Establish that the response is the type that we expect. assert isinstance(response, cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -1742,6 +1744,7 @@ async def test_get_cached_content_empty_call_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -1774,12 +1777,7 @@ async def test_get_cached_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_cached_content ] = mock_object @@ -1819,6 +1817,7 @@ async def test_get_cached_content_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -1833,6 +1832,7 @@ async def test_get_cached_content_async( # Establish that the response is the type that we expect. assert isinstance(response, cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -2016,6 +2016,7 @@ def test_update_cached_content(request_type, transport: str = "grpc"): # Designate an appropriate return value for the call. call.return_value = gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) response = client.update_cached_content(request) @@ -2029,6 +2030,7 @@ def test_update_cached_content(request_type, transport: str = "grpc"): # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -2136,6 +2138,7 @@ async def test_update_cached_content_empty_call_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -2168,12 +2171,7 @@ async def test_update_cached_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_cached_content ] = mock_object @@ -2213,6 +2211,7 @@ async def test_update_cached_content_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) ) @@ -2227,6 +2226,7 @@ async def test_update_cached_content_async( # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -2578,12 +2578,7 @@ async def test_delete_cached_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_cached_content ] = mock_object @@ -2963,12 +2958,7 @@ async def test_list_cached_contents_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_cached_contents ] = mock_object @@ -3396,6 +3386,7 @@ def test_create_cached_content_rest(request_type): "expire_time": {"seconds": 751, "nanos": 543}, "ttl": {"seconds": 751, "nanos": 543}, "name": "name_value", + "display_name": "display_name_value", "model": "model_value", "system_instruction": { "role": "role_value", @@ -3564,6 +3555,7 @@ def get_message_fields(field): # Designate an appropriate value for the returned response. return_value = gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) @@ -3581,6 +3573,7 @@ def get_message_fields(field): # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -3695,7 +3688,7 @@ def test_create_cached_content_rest_required_fields( response = client.create_cached_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3890,6 +3883,7 @@ def test_get_cached_content_rest(request_type): # Designate an appropriate value for the returned response. return_value = cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) @@ -3907,6 +3901,7 @@ def test_get_cached_content_rest(request_type): # Establish that the response is the type that we expect. assert isinstance(response, cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -4019,7 +4014,7 @@ def test_get_cached_content_rest_required_fields( response = client.get_cached_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4202,6 +4197,7 @@ def test_update_cached_content_rest(request_type): "expire_time": {"seconds": 751, "nanos": 543}, "ttl": {"seconds": 751, "nanos": 543}, "name": "projects/sample1/locations/sample2/cachedContents/sample3", + "display_name": "display_name_value", "model": "model_value", "system_instruction": { "role": "role_value", @@ -4370,6 +4366,7 @@ def get_message_fields(field): # Designate an appropriate value for the returned response. return_value = gca_cached_content.CachedContent( name="name_value", + display_name="display_name_value", model="model_value", ) @@ -4387,6 +4384,7 @@ def get_message_fields(field): # Establish that the response is the type that we expect. assert isinstance(response, gca_cached_content.CachedContent) assert response.name == "name_value" + assert response.display_name == "display_name_value" assert response.model == "model_value" @@ -4498,7 +4496,7 @@ def test_update_cached_content_rest_required_fields( response = client.update_cached_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4821,7 +4819,7 @@ def test_delete_cached_content_rest_required_fields( response = client.delete_cached_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5130,7 +5128,7 @@ def test_list_cached_contents_rest_required_fields( response = client.list_cached_contents(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py index 49ba71d4c9..342935faf4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py @@ -1209,6 +1209,7 @@ def test_create_tuning_job(request_type, transport: str = "grpc"): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", base_model="base_model_value", ) response = client.create_tuning_job(request) @@ -1226,6 +1227,7 @@ def test_create_tuning_job(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" def test_create_tuning_job_empty_call(): @@ -1337,6 +1339,7 @@ async def test_create_tuning_job_empty_call_async(): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", ) ) response = await client.create_tuning_job() @@ -1368,12 +1371,7 @@ async def test_create_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tuning_job ] = mock_object @@ -1417,6 +1415,7 @@ async def test_create_tuning_job_async( description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", ) ) response = await client.create_tuning_job(request) @@ -1434,6 +1433,7 @@ async def test_create_tuning_job_async( assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" @pytest.mark.asyncio @@ -1628,6 +1628,7 @@ def test_get_tuning_job(request_type, transport: str = "grpc"): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", base_model="base_model_value", ) response = client.get_tuning_job(request) @@ -1645,6 +1646,7 @@ def test_get_tuning_job(request_type, transport: str = "grpc"): assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" def test_get_tuning_job_empty_call(): @@ -1748,6 +1750,7 @@ async def test_get_tuning_job_empty_call_async(): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", ) ) response = await client.get_tuning_job() @@ -1779,12 +1782,7 @@ async def test_get_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tuning_job ] = mock_object @@ -1826,6 +1824,7 @@ async def test_get_tuning_job_async( description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", ) ) response = await client.get_tuning_job(request) @@ -1843,6 +1842,7 @@ async def test_get_tuning_job_async( assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" @pytest.mark.asyncio @@ -2163,12 +2163,7 @@ async def test_list_tuning_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tuning_jobs ] = mock_object @@ -2726,12 +2721,7 @@ async def test_cancel_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_tuning_job ] = mock_object @@ -2959,6 +2949,19 @@ def test_create_tuning_job_rest(request_type): "adapter_size": 1, }, }, + "distillation_spec": { + "base_teacher_model": "base_teacher_model_value", + "tuned_teacher_model_source": "tuned_teacher_model_source_value", + "training_dataset_uri": "training_dataset_uri_value", + "validation_dataset_uri": "validation_dataset_uri_value", + "hyper_parameters": { + "epoch_count": 1175, + "learning_rate_multiplier": 0.2561, + "adapter_size": 1, + }, + "student_model": "student_model_value", + "pipeline_root_directory": "pipeline_root_directory_value", + }, "name": "name_value", "tuned_model_display_name": "tuned_model_display_name_value", "description": "description_value", @@ -2985,9 +2988,11 @@ def test_create_tuning_job_rest(request_type): "tuning_dataset_example_count": 2989, "total_tuning_character_count": 2988, "total_billable_character_count": 3150, + "total_billable_token_count": 2754, "tuning_step_count": 1848, "user_input_token_distribution": { "sum": 341, + "billable_sum": 1259, "min_": 0.419, "max_": 0.421, "mean": 0.417, @@ -3028,8 +3033,30 @@ def test_create_tuning_job_rest(request_type): ], } ], - } + }, + "distillation_data_stats": { + "training_dataset_stats": { + "tuning_dataset_example_count": 2989, + "total_tuning_character_count": 2988, + "total_billable_character_count": 3150, + "tuning_step_count": 1848, + "user_input_token_distribution": { + "sum": 0.341, + "min_": 0.419, + "max_": 0.421, + "mean": 0.417, + "median": 0.622, + "p5": 0.165, + "p95": 0.222, + "buckets": [{"count": 553, "left": 0.427, "right": 0.542}], + }, + "user_output_token_distribution": {}, + "user_message_per_example_distribution": {}, + "user_dataset_examples": {}, + } + }, }, + "pipeline_job": "pipeline_job_value", "encryption_spec": {"kms_key_name": "kms_key_name_value"}, } # The version of a generated dependency at test runtime may differ from the version used during generation. @@ -3110,6 +3137,7 @@ def get_message_fields(field): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", base_model="base_model_value", ) @@ -3131,6 +3159,7 @@ def get_message_fields(field): assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" def test_create_tuning_job_rest_use_cached_wrapped_rpc(): @@ -3241,7 +3270,7 @@ def test_create_tuning_job_rest_required_fields( response = client.create_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3435,6 +3464,7 @@ def test_get_tuning_job_rest(request_type): description="description_value", state=job_state.JobState.JOB_STATE_QUEUED, experiment="experiment_value", + pipeline_job="pipeline_job_value", base_model="base_model_value", ) @@ -3456,6 +3486,7 @@ def test_get_tuning_job_rest(request_type): assert response.description == "description_value" assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.experiment == "experiment_value" + assert response.pipeline_job == "pipeline_job_value" def test_get_tuning_job_rest_use_cached_wrapped_rpc(): @@ -3563,7 +3594,7 @@ def test_get_tuning_job_rest_required_fields( response = client.get_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3876,7 +3907,7 @@ def test_list_tuning_jobs_rest_required_fields( response = client.list_tuning_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4240,7 +4271,7 @@ def test_cancel_tuning_job_rest_required_fields( response = client.cancel_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5026,10 +5057,38 @@ def test_parse_model_path(): assert expected == actual -def test_tuning_job_path(): +def test_pipeline_job_path(): project = "winkle" location = "nautilus" - tuning_job = "scallop" + pipeline_job = "scallop" + expected = ( + "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, + location=location, + pipeline_job=pipeline_job, + ) + ) + actual = GenAiTuningServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "abalone", + "location": "squid", + "pipeline_job": "clam", + } + path = GenAiTuningServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_pipeline_job_path(path) + assert expected == actual + + +def test_tuning_job_path(): + project = "whelk" + location = "octopus" + tuning_job = "oyster" expected = "projects/{project}/locations/{location}/tuningJobs/{tuning_job}".format( project=project, location=location, @@ -5041,9 +5100,9 @@ def test_tuning_job_path(): def test_parse_tuning_job_path(): expected = { - "project": "abalone", - "location": "squid", - "tuning_job": "clam", + "project": "nudibranch", + "location": "cuttlefish", + "tuning_job": "mussel", } path = GenAiTuningServiceClient.tuning_job_path(**expected) @@ -5053,7 +5112,7 @@ def test_parse_tuning_job_path(): def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -5063,7 +5122,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "nautilus", } path = GenAiTuningServiceClient.common_billing_account_path(**expected) @@ -5073,7 +5132,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "scallop" expected = "folders/{folder}".format( folder=folder, ) @@ -5083,7 +5142,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "abalone", } path = GenAiTuningServiceClient.common_folder_path(**expected) @@ -5093,7 +5152,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "squid" expected = "organizations/{organization}".format( organization=organization, ) @@ -5103,7 +5162,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "clam", } path = GenAiTuningServiceClient.common_organization_path(**expected) @@ -5113,7 +5172,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "whelk" expected = "projects/{project}".format( project=project, ) @@ -5123,7 +5182,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "octopus", } path = GenAiTuningServiceClient.common_project_path(**expected) @@ -5133,8 +5192,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -5145,8 +5204,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "cuttlefish", + "location": "mussel", } path = GenAiTuningServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index 9dcadbdcd4..fd59b09017 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -1386,12 +1386,7 @@ async def test_create_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_index_endpoint ] = mock_object @@ -1810,12 +1805,7 @@ async def test_get_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_index_endpoint ] = mock_object @@ -2220,12 +2210,7 @@ async def test_list_index_endpoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_index_endpoints ] = mock_object @@ -2828,12 +2813,7 @@ async def test_update_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_index_endpoint ] = mock_object @@ -3244,12 +3224,7 @@ async def test_delete_index_endpoint_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_index_endpoint ] = mock_object @@ -3624,12 +3599,7 @@ async def test_deploy_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.deploy_index ] = mock_object @@ -4006,12 +3976,7 @@ async def test_undeploy_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.undeploy_index ] = mock_object @@ -4399,12 +4364,7 @@ async def test_mutate_deployed_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_deployed_index ] = mock_object @@ -4915,7 +4875,7 @@ def test_create_index_endpoint_rest_required_fields( response = client.create_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5247,7 +5207,7 @@ def test_get_index_endpoint_rest_required_fields( response = client.get_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5569,7 +5529,7 @@ def test_list_index_endpoints_rest_required_fields( response = client.list_index_endpoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6108,7 +6068,7 @@ def test_update_index_endpoint_rest_required_fields( response = client.update_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6431,7 +6391,7 @@ def test_delete_index_endpoint_rest_required_fields( response = client.delete_index_endpoint(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6736,7 +6696,7 @@ def test_deploy_index_rest_required_fields( response = client.deploy_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7056,7 +7016,7 @@ def test_undeploy_index_rest_required_fields( response = client.undeploy_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7490,7 +7450,7 @@ def test_mutate_deployed_index_rest_required_fields( response = client.mutate_deployed_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py index 88a97d8897..74a1f9afd3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -1279,12 +1279,7 @@ async def test_create_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_index ] = mock_object @@ -1672,12 +1667,7 @@ async def test_get_index_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_index ] = mock_object @@ -2051,12 +2041,7 @@ async def test_list_indexes_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_indexes ] = mock_object @@ -2605,12 +2590,7 @@ async def test_update_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_index ] = mock_object @@ -2984,12 +2964,7 @@ async def test_delete_index_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_index ] = mock_object @@ -3359,12 +3334,7 @@ async def test_upsert_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upsert_datapoints ] = mock_object @@ -3654,12 +3624,7 @@ async def test_remove_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.remove_datapoints ] = mock_object @@ -4021,7 +3986,7 @@ def test_create_index_rest_required_fields( response = client.create_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4342,7 +4307,7 @@ def test_get_index_rest_required_fields(request_type=index_service.GetIndexReque response = client.get_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4650,7 +4615,7 @@ def test_list_indexes_rest_required_fields( response = client.list_indexes(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5113,7 +5078,7 @@ def test_update_index_rest_required_fields( response = client.update_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5418,7 +5383,7 @@ def test_delete_index_rest_required_fields( response = client.delete_index(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5721,7 +5686,7 @@ def test_upsert_datapoints_rest_required_fields( response = client.upsert_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5967,7 +5932,7 @@ def test_remove_datapoints_rest_required_fields( response = client.remove_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index b8c1a9f647..aae3afafa4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -1315,12 +1315,7 @@ async def test_create_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_custom_job ] = mock_object @@ -1714,12 +1709,7 @@ async def test_get_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_custom_job ] = mock_object @@ -2093,12 +2083,7 @@ async def test_list_custom_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_custom_jobs ] = mock_object @@ -2661,12 +2646,7 @@ async def test_delete_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_custom_job ] = mock_object @@ -3044,12 +3024,7 @@ async def test_cancel_custom_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_custom_job ] = mock_object @@ -3451,12 +3426,7 @@ async def test_create_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_data_labeling_job ] = mock_object @@ -3894,12 +3864,7 @@ async def test_get_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_data_labeling_job ] = mock_object @@ -4308,12 +4273,7 @@ async def test_list_data_labeling_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_data_labeling_jobs ] = mock_object @@ -4898,12 +4858,7 @@ async def test_delete_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_data_labeling_job ] = mock_object @@ -5285,12 +5240,7 @@ async def test_cancel_data_labeling_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_data_labeling_job ] = mock_object @@ -5684,12 +5634,7 @@ async def test_create_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_hyperparameter_tuning_job ] = mock_object @@ -6124,12 +6069,7 @@ async def test_get_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_hyperparameter_tuning_job ] = mock_object @@ -6531,12 +6471,7 @@ async def test_list_hyperparameter_tuning_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_hyperparameter_tuning_jobs ] = mock_object @@ -7127,12 +7062,7 @@ async def test_delete_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_hyperparameter_tuning_job ] = mock_object @@ -7514,12 +7444,7 @@ async def test_cancel_hyperparameter_tuning_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_hyperparameter_tuning_job ] = mock_object @@ -7894,12 +7819,7 @@ async def test_create_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_nas_job ] = mock_object @@ -8284,12 +8204,7 @@ async def test_get_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_nas_job ] = mock_object @@ -8659,12 +8574,7 @@ async def test_list_nas_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_nas_jobs ] = mock_object @@ -9217,12 +9127,7 @@ async def test_delete_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_nas_job ] = mock_object @@ -9580,12 +9485,7 @@ async def test_cancel_nas_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_nas_job ] = mock_object @@ -9955,12 +9855,7 @@ async def test_get_nas_trial_detail_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_nas_trial_detail ] = mock_object @@ -10351,12 +10246,7 @@ async def test_list_nas_trial_details_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_nas_trial_details ] = mock_object @@ -10795,6 +10685,8 @@ def test_create_batch_prediction_job(request_type, transport: str = "grpc"): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.create_batch_prediction_job(request) @@ -10814,6 +10706,8 @@ def test_create_batch_prediction_job(request_type, transport: str = "grpc"): assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_create_batch_prediction_job_empty_call(): @@ -10931,6 +10825,8 @@ async def test_create_batch_prediction_job_empty_call_async(): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.create_batch_prediction_job() @@ -10962,12 +10858,7 @@ async def test_create_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_batch_prediction_job ] = mock_object @@ -11014,6 +10905,8 @@ async def test_create_batch_prediction_job_async( generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.create_batch_prediction_job(request) @@ -11034,6 +10927,8 @@ async def test_create_batch_prediction_job_async( assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -11241,6 +11136,8 @@ def test_get_batch_prediction_job(request_type, transport: str = "grpc"): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_batch_prediction_job(request) @@ -11260,6 +11157,8 @@ def test_get_batch_prediction_job(request_type, transport: str = "grpc"): assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_batch_prediction_job_empty_call(): @@ -11377,6 +11276,8 @@ async def test_get_batch_prediction_job_empty_call_async(): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_batch_prediction_job() @@ -11408,12 +11309,7 @@ async def test_get_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_batch_prediction_job ] = mock_object @@ -11460,6 +11356,8 @@ async def test_get_batch_prediction_job_async( generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_batch_prediction_job(request) @@ -11480,6 +11378,8 @@ async def test_get_batch_prediction_job_async( assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -11819,12 +11719,7 @@ async def test_list_batch_prediction_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_batch_prediction_jobs ] = mock_object @@ -12413,12 +12308,7 @@ async def test_delete_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_batch_prediction_job ] = mock_object @@ -12800,12 +12690,7 @@ async def test_cancel_batch_prediction_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_batch_prediction_job ] = mock_object @@ -13210,12 +13095,7 @@ async def test_create_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_model_deployment_monitoring_job ] = mock_object @@ -13674,12 +13554,7 @@ async def test_search_model_deployment_monitoring_stats_anomalies_async_use_cach ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_model_deployment_monitoring_stats_anomalies ] = mock_object @@ -14338,12 +14213,7 @@ async def test_get_model_deployment_monitoring_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_deployment_monitoring_job ] = mock_object @@ -14760,12 +14630,7 @@ async def test_list_model_deployment_monitoring_jobs_async_use_cached_wrapped_rp ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_deployment_monitoring_jobs ] = mock_object @@ -15352,12 +15217,7 @@ async def test_update_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_model_deployment_monitoring_job ] = mock_object @@ -15767,12 +15627,7 @@ async def test_delete_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_deployment_monitoring_job ] = mock_object @@ -16154,12 +16009,7 @@ async def test_pause_model_deployment_monitoring_job_async_use_cached_wrapped_rp ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.pause_model_deployment_monitoring_job ] = mock_object @@ -16531,12 +16381,7 @@ async def test_resume_model_deployment_monitoring_job_async_use_cached_wrapped_r ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.resume_model_deployment_monitoring_job ] = mock_object @@ -16796,6 +16641,7 @@ def test_create_custom_job_rest(request_type): "scheduling": { "timeout": {"seconds": 751, "nanos": 543}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -17036,7 +16882,7 @@ def test_create_custom_job_rest_required_fields( response = client.create_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17353,7 +17199,7 @@ def test_get_custom_job_rest_required_fields( response = client.get_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17667,7 +17513,7 @@ def test_list_custom_jobs_rest_required_fields( response = client.list_custom_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18035,7 +17881,7 @@ def test_delete_custom_job_rest_required_fields( response = client.delete_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18335,7 +18181,7 @@ def test_cancel_custom_job_rest_required_fields( response = client.cancel_custom_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18770,7 +18616,7 @@ def test_create_data_labeling_job_rest_required_fields( response = client.create_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19106,7 +18952,7 @@ def test_get_data_labeling_job_rest_required_fields( response = client.get_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19428,7 +19274,7 @@ def test_list_data_labeling_jobs_rest_required_fields( response = client.list_data_labeling_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19802,7 +19648,7 @@ def test_delete_data_labeling_job_rest_required_fields( response = client.delete_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20109,7 +19955,7 @@ def test_cancel_data_labeling_job_rest_required_fields( response = client.cancel_data_labeling_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20407,6 +20253,7 @@ def test_create_hyperparameter_tuning_job_rest(request_type): "scheduling": { "timeout": {}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -20697,7 +20544,7 @@ def test_create_hyperparameter_tuning_job_rest_required_fields( response = client.create_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21042,7 +20889,7 @@ def test_get_hyperparameter_tuning_job_rest_required_fields( response = client.get_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21371,7 +21218,7 @@ def test_list_hyperparameter_tuning_jobs_rest_required_fields( response = client.list_hyperparameter_tuning_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21755,7 +21602,7 @@ def test_delete_hyperparameter_tuning_job_rest_required_fields( response = client.delete_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22065,7 +21912,7 @@ def test_cancel_hyperparameter_tuning_job_rest_required_fields( response = client.cancel_hyperparameter_tuning_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22288,6 +22135,7 @@ def test_create_nas_job_rest(request_type): "scheduling": { "timeout": {"seconds": 751, "nanos": 543}, "restart_job_on_worker_restart": True, + "strategy": 1, "disable_retries": True, }, "service_account": "service_account_value", @@ -22563,7 +22411,7 @@ def test_create_nas_job_rest_required_fields( response = client.create_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22878,7 +22726,7 @@ def test_get_nas_job_rest_required_fields(request_type=job_service.GetNasJobRequ response = client.get_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23186,7 +23034,7 @@ def test_list_nas_jobs_rest_required_fields( response = client.list_nas_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23548,7 +23396,7 @@ def test_delete_nas_job_rest_required_fields( response = client.delete_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23844,7 +23692,7 @@ def test_cancel_nas_job_rest_required_fields( response = client.cancel_nas_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24145,7 +23993,7 @@ def test_get_nas_trial_detail_rest_required_fields( response = client.get_nas_trial_detail(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24464,7 +24312,7 @@ def test_list_nas_trial_details_rest_required_fields( response = client.list_nas_trial_details(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24911,6 +24759,8 @@ def test_create_batch_prediction_job_rest(request_type): ], "model_monitoring_status": {}, "disable_container_logging": True, + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -24997,6 +24847,8 @@ def get_message_fields(field): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -25020,6 +24872,8 @@ def get_message_fields(field): assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_create_batch_prediction_job_rest_use_cached_wrapped_rpc(): @@ -25133,7 +24987,7 @@ def test_create_batch_prediction_job_rest_required_fields( response = client.create_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25336,6 +25190,8 @@ def test_get_batch_prediction_job_rest(request_type): generate_explanation=True, state=job_state.JobState.JOB_STATE_QUEUED, disable_container_logging=True, + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -25359,6 +25215,8 @@ def test_get_batch_prediction_job_rest(request_type): assert response.generate_explanation is True assert response.state == job_state.JobState.JOB_STATE_QUEUED assert response.disable_container_logging is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_batch_prediction_job_rest_use_cached_wrapped_rpc(): @@ -25471,7 +25329,7 @@ def test_get_batch_prediction_job_rest_required_fields( response = client.get_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25792,7 +25650,7 @@ def test_list_batch_prediction_jobs_rest_required_fields( response = client.list_batch_prediction_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26167,7 +26025,7 @@ def test_delete_batch_prediction_job_rest_required_fields( response = client.delete_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26474,7 +26332,7 @@ def test_cancel_batch_prediction_job_rest_required_fields( response = client.cancel_batch_prediction_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -26980,7 +26838,7 @@ def test_create_model_deployment_monitoring_job_rest_required_fields( response = client.create_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -27349,7 +27207,7 @@ def test_search_model_deployment_monitoring_stats_anomalies_rest_required_fields request ) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -27793,7 +27651,7 @@ def test_get_model_deployment_monitoring_job_rest_required_fields( response = client.get_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -28133,7 +27991,7 @@ def test_list_model_deployment_monitoring_jobs_rest_required_fields( response = client.list_model_deployment_monitoring_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -28699,7 +28557,7 @@ def test_update_model_deployment_monitoring_job_rest_required_fields( response = client.update_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -29038,7 +28896,7 @@ def test_delete_model_deployment_monitoring_job_rest_required_fields( response = client.delete_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -29356,7 +29214,7 @@ def test_pause_model_deployment_monitoring_job_rest_required_fields( response = client.pause_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -29663,7 +29521,7 @@ def test_resume_model_deployment_monitoring_job_rest_required_fields( response = client.resume_model_deployment_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py index 8469994d95..79d565c295 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py @@ -1330,12 +1330,7 @@ async def test_compute_tokens_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.compute_tokens ] = mock_object @@ -1686,7 +1681,7 @@ def test_compute_tokens_rest_required_fields( response = client.compute_tokens(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py index 6263538415..4dfe0eccb3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py @@ -1264,12 +1264,7 @@ async def test_find_neighbors_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.find_neighbors ] = mock_object @@ -1558,12 +1553,7 @@ async def test_read_index_datapoints_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_index_datapoints ] = mock_object @@ -1830,7 +1820,7 @@ def test_find_neighbors_rest_required_fields( response = client.find_neighbors(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2083,7 +2073,7 @@ def test_read_index_datapoints_rest_required_fields( response = client.read_index_datapoints(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py index 931223632a..bfeb2fdfba 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -1352,12 +1352,7 @@ async def test_create_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_metadata_store ] = mock_object @@ -1768,12 +1763,7 @@ async def test_get_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_metadata_store ] = mock_object @@ -2164,12 +2154,7 @@ async def test_list_metadata_stores_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_metadata_stores ] = mock_object @@ -2754,12 +2739,7 @@ async def test_delete_metadata_store_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_metadata_store ] = mock_object @@ -3158,12 +3138,7 @@ async def test_create_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_artifact ] = mock_object @@ -3582,12 +3557,7 @@ async def test_get_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_artifact ] = mock_object @@ -3967,12 +3937,7 @@ async def test_list_artifacts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_artifacts ] = mock_object @@ -4543,12 +4508,7 @@ async def test_update_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_artifact ] = mock_object @@ -4937,12 +4897,7 @@ async def test_delete_artifact_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_artifact ] = mock_object @@ -5308,12 +5263,7 @@ async def test_purge_artifacts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_artifacts ] = mock_object @@ -5698,12 +5648,7 @@ async def test_create_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_context ] = mock_object @@ -6113,12 +6058,7 @@ async def test_get_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_context ] = mock_object @@ -6496,12 +6436,7 @@ async def test_list_contexts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_contexts ] = mock_object @@ -7069,12 +7004,7 @@ async def test_update_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_context ] = mock_object @@ -7457,12 +7387,7 @@ async def test_delete_context_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_context ] = mock_object @@ -7828,12 +7753,7 @@ async def test_purge_contexts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_contexts ] = mock_object @@ -8208,12 +8128,7 @@ async def test_add_context_artifacts_and_executions_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_context_artifacts_and_executions ] = mock_object @@ -8614,12 +8529,7 @@ async def test_add_context_children_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_context_children ] = mock_object @@ -9009,12 +8919,7 @@ async def test_remove_context_children_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.remove_context_children ] = mock_object @@ -9404,12 +9309,7 @@ async def test_query_context_lineage_subgraph_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_context_lineage_subgraph ] = mock_object @@ -9803,12 +9703,7 @@ async def test_create_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_execution ] = mock_object @@ -10223,12 +10118,7 @@ async def test_get_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_execution ] = mock_object @@ -10606,12 +10496,7 @@ async def test_list_executions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_executions ] = mock_object @@ -11181,12 +11066,7 @@ async def test_update_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_execution ] = mock_object @@ -11576,12 +11456,7 @@ async def test_delete_execution_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_execution ] = mock_object @@ -11950,12 +11825,7 @@ async def test_purge_executions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.purge_executions ] = mock_object @@ -12328,12 +12198,7 @@ async def test_add_execution_events_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_execution_events ] = mock_object @@ -12723,12 +12588,7 @@ async def test_query_execution_inputs_and_outputs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_execution_inputs_and_outputs ] = mock_object @@ -13130,12 +12990,7 @@ async def test_create_metadata_schema_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_metadata_schema ] = mock_object @@ -13568,12 +13423,7 @@ async def test_get_metadata_schema_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_metadata_schema ] = mock_object @@ -13976,12 +13826,7 @@ async def test_list_metadata_schemas_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_metadata_schemas ] = mock_object @@ -14564,12 +14409,7 @@ async def test_query_artifact_lineage_subgraph_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_artifact_lineage_subgraph ] = mock_object @@ -15004,7 +14844,7 @@ def test_create_metadata_store_rest_required_fields( response = client.create_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15325,7 +15165,7 @@ def test_get_metadata_store_rest_required_fields( response = client.get_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15641,7 +15481,7 @@ def test_list_metadata_stores_rest_required_fields( response = client.list_metadata_stores(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16012,7 +15852,7 @@ def test_delete_metadata_store_rest_required_fields( response = client.delete_metadata_store(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16417,7 +16257,7 @@ def test_create_artifact_rest_required_fields( response = client.create_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16752,7 +16592,7 @@ def test_get_artifact_rest_required_fields( response = client.get_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17068,7 +16908,7 @@ def test_list_artifacts_rest_required_fields( response = client.list_artifacts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17546,7 +17386,7 @@ def test_update_artifact_rest_required_fields( response = client.update_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17867,7 +17707,7 @@ def test_delete_artifact_rest_required_fields( response = client.delete_artifact(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18177,7 +18017,7 @@ def test_purge_artifacts_rest_required_fields( response = client.purge_artifacts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18589,7 +18429,7 @@ def test_create_context_rest_required_fields( response = client.create_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18920,7 +18760,7 @@ def test_get_context_rest_required_fields( response = client.get_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19236,7 +19076,7 @@ def test_list_contexts_rest_required_fields( response = client.list_contexts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19711,7 +19551,7 @@ def test_update_context_rest_required_fields( response = client.update_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20035,7 +19875,7 @@ def test_delete_context_rest_required_fields( response = client.delete_context(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20353,7 +20193,7 @@ def test_purge_contexts_rest_required_fields( response = client.purge_contexts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20679,7 +20519,7 @@ def test_add_context_artifacts_and_executions_rest_required_fields( response = client.add_context_artifacts_and_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21005,7 +20845,7 @@ def test_add_context_children_rest_required_fields( response = client.add_context_children(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21321,7 +21161,7 @@ def test_remove_context_children_rest_required_fields( response = client.remove_context_children(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21636,7 +21476,7 @@ def test_query_context_lineage_subgraph_rest_required_fields( response = client.query_context_lineage_subgraph(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22045,7 +21885,7 @@ def test_create_execution_rest_required_fields( response = client.create_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22378,7 +22218,7 @@ def test_get_execution_rest_required_fields( response = client.get_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22694,7 +22534,7 @@ def test_list_executions_rest_required_fields( response = client.list_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23171,7 +23011,7 @@ def test_update_execution_rest_required_fields( response = client.update_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23494,7 +23334,7 @@ def test_delete_execution_rest_required_fields( response = client.delete_execution(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23806,7 +23646,7 @@ def test_purge_executions_rest_required_fields( response = client.purge_executions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24125,7 +23965,7 @@ def test_add_execution_events_rest_required_fields( response = client.add_execution_events(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24438,7 +24278,7 @@ def test_query_execution_inputs_and_outputs_rest_required_fields( response = client.query_execution_inputs_and_outputs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24850,7 +24690,7 @@ def test_create_metadata_schema_rest_required_fields( response = client.create_metadata_schema(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25186,7 +25026,7 @@ def test_get_metadata_schema_rest_required_fields( response = client.get_metadata_schema(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25508,7 +25348,7 @@ def test_list_metadata_schemas_rest_required_fields( response = client.list_metadata_schemas(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25896,7 +25736,7 @@ def test_query_artifact_lineage_subgraph_rest_required_fields( response = client.query_artifact_lineage_subgraph(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 54fc23d834..e651d6ef7b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1344,12 +1344,7 @@ async def test_search_migratable_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_migratable_resources ] = mock_object @@ -1938,12 +1933,7 @@ async def test_batch_migrate_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_migrate_resources ] = mock_object @@ -2356,7 +2346,7 @@ def test_search_migratable_resources_rest_required_fields( response = client.search_migratable_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2726,7 +2716,7 @@ def test_batch_migrate_resources_rest_required_fields( response = client.batch_migrate_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3535,22 +3525,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -3560,19 +3547,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py index 17d5e4add9..7fb6cf5dbd 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py @@ -1376,12 +1376,7 @@ async def test_get_publisher_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_publisher_model ] = mock_object @@ -1797,12 +1792,7 @@ async def test_list_publisher_models_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_publisher_models ] = mock_object @@ -2341,6 +2331,7 @@ def test_get_publisher_model_rest_required_fields( # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( ( + "is_hugging_face_model", "language_code", "view", ) @@ -2387,7 +2378,7 @@ def test_get_publisher_model_rest_required_fields( response = client.get_publisher_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -2401,6 +2392,7 @@ def test_get_publisher_model_rest_unset_required_fields(): assert set(unset_fields) == ( set( ( + "isHuggingFaceModel", "languageCode", "view", ) @@ -2714,7 +2706,7 @@ def test_list_publisher_models_rest_required_fields( response = client.list_publisher_models(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py index c5b19a30e3..32ae56caaf 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py @@ -1404,12 +1404,7 @@ async def test_create_model_monitor_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_model_monitor ] = mock_object @@ -1838,12 +1833,7 @@ async def test_update_model_monitor_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_model_monitor ] = mock_object @@ -2126,6 +2116,8 @@ def test_get_model_monitor(request_type, transport: str = "grpc"): call.return_value = model_monitor.ModelMonitor( name="name_value", display_name="display_name_value", + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_model_monitor(request) @@ -2139,6 +2131,8 @@ def test_get_model_monitor(request_type, transport: str = "grpc"): assert isinstance(response, model_monitor.ModelMonitor) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_model_monitor_empty_call(): @@ -2247,6 +2241,8 @@ async def test_get_model_monitor_empty_call_async(): model_monitor.ModelMonitor( name="name_value", display_name="display_name_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_model_monitor() @@ -2278,12 +2274,7 @@ async def test_get_model_monitor_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_monitor ] = mock_object @@ -2324,6 +2315,8 @@ async def test_get_model_monitor_async( model_monitor.ModelMonitor( name="name_value", display_name="display_name_value", + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_model_monitor(request) @@ -2338,6 +2331,8 @@ async def test_get_model_monitor_async( assert isinstance(response, model_monitor.ModelMonitor) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2676,12 +2671,7 @@ async def test_list_model_monitors_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_monitors ] = mock_object @@ -3265,12 +3255,7 @@ async def test_delete_model_monitor_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_monitor ] = mock_object @@ -3670,12 +3655,7 @@ async def test_create_model_monitoring_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_model_monitoring_job ] = mock_object @@ -4096,12 +4076,7 @@ async def test_get_model_monitoring_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_monitoring_job ] = mock_object @@ -4499,12 +4474,7 @@ async def test_list_model_monitoring_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_monitoring_jobs ] = mock_object @@ -5093,12 +5063,7 @@ async def test_delete_model_monitoring_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_monitoring_job ] = mock_object @@ -5489,12 +5454,7 @@ async def test_search_model_monitoring_stats_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_model_monitoring_stats ] = mock_object @@ -6104,12 +6064,7 @@ async def test_search_model_monitoring_alerts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.search_model_monitoring_alerts ] = mock_object @@ -6670,6 +6625,8 @@ def test_create_model_monitor_rest(request_type): }, "create_time": {}, "update_time": {}, + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -6873,7 +6830,7 @@ def test_create_model_monitor_rest_required_fields( response = client.create_model_monitor(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7197,6 +7154,8 @@ def test_update_model_monitor_rest(request_type): }, "create_time": {}, "update_time": {}, + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -7395,7 +7354,7 @@ def test_update_model_monitor_rest_required_fields( response = client.update_model_monitor(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7607,6 +7566,8 @@ def test_get_model_monitor_rest(request_type): return_value = model_monitor.ModelMonitor( name="name_value", display_name="display_name_value", + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -7624,6 +7585,8 @@ def test_get_model_monitor_rest(request_type): assert isinstance(response, model_monitor.ModelMonitor) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_model_monitor_rest_use_cached_wrapped_rpc(): @@ -7733,7 +7696,7 @@ def test_get_model_monitor_rest_required_fields( response = client.get_model_monitor(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8056,7 +8019,7 @@ def test_list_model_monitors_rest_required_fields( response = client.list_model_monitors(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8434,7 +8397,7 @@ def test_delete_model_monitor_rest_required_fields( response = client.delete_model_monitor(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8964,7 +8927,7 @@ def test_create_model_monitoring_job_rest_required_fields( response = client.create_model_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9301,7 +9264,7 @@ def test_get_model_monitoring_job_rest_required_fields( response = client.get_model_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9630,7 +9593,7 @@ def test_list_model_monitoring_jobs_rest_required_fields( response = client.list_model_monitoring_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10019,7 +9982,7 @@ def test_delete_model_monitoring_job_rest_required_fields( response = client.delete_model_monitoring_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10343,7 +10306,7 @@ def test_search_model_monitoring_stats_rest_required_fields( response = client.search_model_monitoring_stats(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10739,7 +10702,7 @@ def test_search_model_monitoring_alerts_rest_required_fields( response = client.search_model_monitoring_alerts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index 51f99001e7..1fa64b03d5 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -1297,12 +1297,7 @@ async def test_upload_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upload_model ] = mock_object @@ -1734,12 +1729,7 @@ async def test_get_model_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model ] = mock_object @@ -2145,12 +2135,7 @@ async def test_list_models_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_models ] = mock_object @@ -2722,12 +2707,7 @@ async def test_list_model_versions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_versions ] = mock_object @@ -3354,12 +3334,7 @@ async def test_update_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_model ] = mock_object @@ -3783,12 +3758,7 @@ async def test_update_explanation_dataset_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_explanation_dataset ] = mock_object @@ -4163,12 +4133,7 @@ async def test_delete_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model ] = mock_object @@ -4544,12 +4509,7 @@ async def test_delete_model_version_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_model_version ] = mock_object @@ -4997,12 +4957,7 @@ async def test_merge_version_aliases_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.merge_version_aliases ] = mock_object @@ -5424,12 +5379,7 @@ async def test_export_model_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_model ] = mock_object @@ -5819,12 +5769,7 @@ async def test_copy_model_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.copy_model ] = mock_object @@ -6221,12 +6166,7 @@ async def test_import_model_evaluation_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_model_evaluation ] = mock_object @@ -6634,12 +6574,7 @@ async def test_batch_import_model_evaluation_slices_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_import_model_evaluation_slices ] = mock_object @@ -7049,12 +6984,7 @@ async def test_batch_import_evaluated_annotations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_import_evaluated_annotations ] = mock_object @@ -7484,12 +7414,7 @@ async def test_get_model_evaluation_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_evaluation ] = mock_object @@ -7887,12 +7812,7 @@ async def test_list_model_evaluations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_evaluations ] = mock_object @@ -8481,12 +8401,7 @@ async def test_get_model_evaluation_slice_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_model_evaluation_slice ] = mock_object @@ -8880,12 +8795,7 @@ async def test_list_model_evaluation_slices_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_model_evaluation_slices ] = mock_object @@ -9441,7 +9351,7 @@ def test_upload_model_rest_required_fields( response = client.upload_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9790,7 +9700,7 @@ def test_get_model_rest_required_fields(request_type=model_service.GetModelReque response = client.get_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10096,7 +10006,7 @@ def test_list_models_rest_required_fields(request_type=model_service.ListModelsR response = client.list_models(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10478,7 +10388,7 @@ def test_list_model_versions_rest_required_fields( response = client.list_model_versions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11067,7 +10977,7 @@ def test_update_model_rest_required_fields( response = client.update_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11384,7 +11294,7 @@ def test_update_explanation_dataset_rest_required_fields( response = client.update_explanation_dataset(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11683,7 +11593,7 @@ def test_delete_model_rest_required_fields( response = client.delete_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11986,7 +11896,7 @@ def test_delete_model_version_rest_required_fields( response = client.delete_model_version(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12337,7 +12247,7 @@ def test_merge_version_aliases_rest_required_fields( response = client.merge_version_aliases(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12645,7 +12555,7 @@ def test_export_model_rest_required_fields( response = client.export_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12961,7 +12871,7 @@ def test_copy_model_rest_required_fields(request_type=model_service.CopyModelReq response = client.copy_model(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13284,7 +13194,7 @@ def test_import_model_evaluation_rest_required_fields( response = client.import_model_evaluation(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13611,7 +13521,7 @@ def test_batch_import_model_evaluation_slices_rest_required_fields( response = client.batch_import_model_evaluation_slices(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13955,7 +13865,7 @@ def test_batch_import_evaluated_annotations_rest_required_fields( response = client.batch_import_evaluated_annotations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14302,7 +14212,7 @@ def test_get_model_evaluation_rest_required_fields( response = client.get_model_evaluation(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -14623,7 +14533,7 @@ def test_list_model_evaluations_rest_required_fields( response = client.list_model_evaluations(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15002,7 +14912,7 @@ def test_get_model_evaluation_slice_rest_required_fields( response = client.get_model_evaluation_slice(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15327,7 +15237,7 @@ def test_list_model_evaluation_slices_rest_required_fields( response = client.list_model_evaluation_slices(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py index 86ea55606c..e59ed840ef 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py @@ -1357,12 +1357,7 @@ async def test_create_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_notebook_runtime_template ] = mock_object @@ -1803,12 +1798,7 @@ async def test_get_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_notebook_runtime_template ] = mock_object @@ -2219,12 +2209,7 @@ async def test_list_notebook_runtime_templates_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_notebook_runtime_templates ] = mock_object @@ -2813,12 +2798,7 @@ async def test_delete_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_notebook_runtime_template ] = mock_object @@ -3227,12 +3207,7 @@ async def test_update_notebook_runtime_template_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_notebook_runtime_template ] = mock_object @@ -3658,12 +3633,7 @@ async def test_assign_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.assign_notebook_runtime ] = mock_object @@ -4125,12 +4095,7 @@ async def test_get_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_notebook_runtime ] = mock_object @@ -4555,12 +4520,7 @@ async def test_list_notebook_runtimes_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_notebook_runtimes ] = mock_object @@ -5145,12 +5105,7 @@ async def test_delete_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_notebook_runtime ] = mock_object @@ -5538,12 +5493,7 @@ async def test_upgrade_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upgrade_notebook_runtime ] = mock_object @@ -5931,12 +5881,7 @@ async def test_start_notebook_runtime_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.start_notebook_runtime ] = mock_object @@ -6326,12 +6271,7 @@ async def test_create_notebook_execution_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_notebook_execution_job ] = mock_object @@ -6776,12 +6716,7 @@ async def test_get_notebook_execution_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_notebook_execution_job ] = mock_object @@ -7181,12 +7116,7 @@ async def test_list_notebook_execution_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_notebook_execution_jobs ] = mock_object @@ -7776,12 +7706,7 @@ async def test_delete_notebook_execution_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_notebook_execution_job ] = mock_object @@ -8250,7 +8175,7 @@ def test_create_notebook_runtime_template_rest_required_fields( response = client.create_notebook_runtime_template(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8598,7 +8523,7 @@ def test_get_notebook_runtime_template_rest_required_fields( response = client.get_notebook_runtime_template(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8927,7 +8852,7 @@ def test_list_notebook_runtime_templates_rest_required_fields( response = client.list_notebook_runtime_templates(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9314,7 +9239,7 @@ def test_delete_notebook_runtime_template_rest_required_fields( response = client.delete_notebook_runtime_template(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9758,7 +9683,7 @@ def test_update_notebook_runtime_template_rest_required_fields( response = client.update_notebook_runtime_template(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10097,7 +10022,7 @@ def test_assign_notebook_runtime_rest_required_fields( response = client.assign_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10452,7 +10377,7 @@ def test_get_notebook_runtime_rest_required_fields( response = client.get_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10776,7 +10701,7 @@ def test_list_notebook_runtimes_rest_required_fields( response = client.list_notebook_runtimes(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11152,7 +11077,7 @@ def test_delete_notebook_runtime_rest_required_fields( response = client.delete_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11463,7 +11388,7 @@ def test_upgrade_notebook_runtime_rest_required_fields( response = client.upgrade_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11774,7 +11699,7 @@ def test_start_notebook_runtime_rest_required_fields( response = client.start_notebook_runtime(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12186,7 +12111,7 @@ def test_create_notebook_execution_job_rest_required_fields( response = client.create_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12530,7 +12455,7 @@ def test_get_notebook_execution_job_rest_required_fields( response = client.get_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -12857,7 +12782,7 @@ def test_list_notebook_execution_jobs_rest_required_fields( response = client.list_notebook_execution_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -13239,7 +13164,7 @@ def test_delete_notebook_execution_job_rest_required_fields( response = client.delete_notebook_execution_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py index b654b0e7c3..1bb0456b55 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py @@ -68,6 +68,7 @@ persistent_resource as gca_persistent_resource, ) from google.cloud.aiplatform_v1beta1.types import persistent_resource_service +from google.cloud.aiplatform_v1beta1.types import service_networking from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore @@ -1409,12 +1410,7 @@ async def test_create_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_persistent_resource ] = mock_object @@ -1843,12 +1839,7 @@ async def test_get_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_persistent_resource ] = mock_object @@ -2246,12 +2237,7 @@ async def test_list_persistent_resources_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_persistent_resources ] = mock_object @@ -2846,12 +2832,7 @@ async def test_delete_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_persistent_resource ] = mock_object @@ -3235,12 +3216,7 @@ async def test_update_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_persistent_resource ] = mock_object @@ -3646,12 +3622,7 @@ async def test_reboot_persistent_resource_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.reboot_persistent_resource ] = mock_object @@ -3918,6 +3889,7 @@ def test_create_persistent_resource_rest(request_type): "update_time": {}, "labels": {}, "network": "network_value", + "psc_interface_config": {"network_attachment": "network_attachment_value"}, "encryption_spec": {"kms_key_name": "kms_key_name_value"}, "resource_runtime_spec": { "service_account_spec": { @@ -3929,6 +3901,7 @@ def test_create_persistent_resource_rest(request_type): "resource_pool_images": {}, "head_node_resource_pool_id": "head_node_resource_pool_id_value", "ray_metric_spec": {"disabled": True}, + "ray_logs_spec": {"disabled": True}, }, }, "resource_runtime": { @@ -4160,6 +4133,7 @@ def test_create_persistent_resource_rest_required_fields( "persistentResourceId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4498,7 +4472,7 @@ def test_get_persistent_resource_rest_required_fields( response = client.get_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4826,7 +4800,7 @@ def test_list_persistent_resources_rest_required_fields( response = client.list_persistent_resources(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5209,7 +5183,7 @@ def test_delete_persistent_resource_rest_required_fields( response = client.delete_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5433,6 +5407,7 @@ def test_update_persistent_resource_rest(request_type): "update_time": {}, "labels": {}, "network": "network_value", + "psc_interface_config": {"network_attachment": "network_attachment_value"}, "encryption_spec": {"kms_key_name": "kms_key_name_value"}, "resource_runtime_spec": { "service_account_spec": { @@ -5444,6 +5419,7 @@ def test_update_persistent_resource_rest(request_type): "resource_pool_images": {}, "head_node_resource_pool_id": "head_node_resource_pool_id_value", "ray_metric_spec": {"disabled": True}, + "ray_logs_spec": {"disabled": True}, }, }, "resource_runtime": { @@ -5655,7 +5631,7 @@ def test_update_persistent_resource_rest_required_fields( response = client.update_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5987,7 +5963,7 @@ def test_reboot_persistent_resource_rest_required_fields( response = client.reboot_persistent_resource(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6794,10 +6770,38 @@ def test_parse_network_path(): assert expected == actual -def test_notebook_runtime_template_path(): +def test_network_attachment_path(): project = "oyster" - location = "nudibranch" - notebook_runtime_template = "cuttlefish" + region = "nudibranch" + networkattachment = "cuttlefish" + expected = "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format( + project=project, + region=region, + networkattachment=networkattachment, + ) + actual = PersistentResourceServiceClient.network_attachment_path( + project, region, networkattachment + ) + assert expected == actual + + +def test_parse_network_attachment_path(): + expected = { + "project": "mussel", + "region": "winkle", + "networkattachment": "nautilus", + } + path = PersistentResourceServiceClient.network_attachment_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_network_attachment_path(path) + assert expected == actual + + +def test_notebook_runtime_template_path(): + project = "scallop" + location = "abalone" + notebook_runtime_template = "squid" expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( project=project, location=location, @@ -6811,9 +6815,9 @@ def test_notebook_runtime_template_path(): def test_parse_notebook_runtime_template_path(): expected = { - "project": "mussel", - "location": "winkle", - "notebook_runtime_template": "nautilus", + "project": "clam", + "location": "whelk", + "notebook_runtime_template": "octopus", } path = PersistentResourceServiceClient.notebook_runtime_template_path(**expected) @@ -6823,9 +6827,9 @@ def test_parse_notebook_runtime_template_path(): def test_persistent_resource_path(): - project = "scallop" - location = "abalone" - persistent_resource = "squid" + project = "oyster" + location = "nudibranch" + persistent_resource = "cuttlefish" expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( project=project, location=location, @@ -6839,9 +6843,9 @@ def test_persistent_resource_path(): def test_parse_persistent_resource_path(): expected = { - "project": "clam", - "location": "whelk", - "persistent_resource": "octopus", + "project": "mussel", + "location": "winkle", + "persistent_resource": "nautilus", } path = PersistentResourceServiceClient.persistent_resource_path(**expected) @@ -6851,7 +6855,7 @@ def test_parse_persistent_resource_path(): def test_common_billing_account_path(): - billing_account = "oyster" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6863,7 +6867,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nudibranch", + "billing_account": "abalone", } path = PersistentResourceServiceClient.common_billing_account_path(**expected) @@ -6873,7 +6877,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "cuttlefish" + folder = "squid" expected = "folders/{folder}".format( folder=folder, ) @@ -6883,7 +6887,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "mussel", + "folder": "clam", } path = PersistentResourceServiceClient.common_folder_path(**expected) @@ -6893,7 +6897,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "winkle" + organization = "whelk" expected = "organizations/{organization}".format( organization=organization, ) @@ -6903,7 +6907,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nautilus", + "organization": "octopus", } path = PersistentResourceServiceClient.common_organization_path(**expected) @@ -6913,7 +6917,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "scallop" + project = "oyster" expected = "projects/{project}".format( project=project, ) @@ -6923,7 +6927,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "abalone", + "project": "nudibranch", } path = PersistentResourceServiceClient.common_project_path(**expected) @@ -6933,8 +6937,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "squid" - location = "clam" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6945,8 +6949,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "whelk", - "location": "octopus", + "project": "winkle", + "location": "nautilus", } path = PersistentResourceServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index fffef3190e..1efabf73c9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -1375,12 +1375,7 @@ async def test_create_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_training_pipeline ] = mock_object @@ -1803,12 +1798,7 @@ async def test_get_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_training_pipeline ] = mock_object @@ -2210,12 +2200,7 @@ async def test_list_training_pipelines_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_training_pipelines ] = mock_object @@ -2800,12 +2785,7 @@ async def test_delete_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_training_pipeline ] = mock_object @@ -3187,12 +3167,7 @@ async def test_cancel_training_pipeline_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_training_pipeline ] = mock_object @@ -3426,6 +3401,8 @@ def test_create_pipeline_job(request_type, transport: str = "grpc"): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.create_pipeline_job(request) @@ -3446,6 +3423,8 @@ def test_create_pipeline_job(request_type, transport: str = "grpc"): assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_create_pipeline_job_empty_call(): @@ -3565,6 +3544,8 @@ async def test_create_pipeline_job_empty_call_async(): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.create_pipeline_job() @@ -3596,12 +3577,7 @@ async def test_create_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_pipeline_job ] = mock_object @@ -3649,6 +3625,8 @@ async def test_create_pipeline_job_async( template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.create_pipeline_job(request) @@ -3670,6 +3648,8 @@ async def test_create_pipeline_job_async( assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -3878,6 +3858,8 @@ def test_get_pipeline_job(request_type, transport: str = "grpc"): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_pipeline_job(request) @@ -3898,6 +3880,8 @@ def test_get_pipeline_job(request_type, transport: str = "grpc"): assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_pipeline_job_empty_call(): @@ -4007,6 +3991,8 @@ async def test_get_pipeline_job_empty_call_async(): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_pipeline_job() @@ -4038,12 +4024,7 @@ async def test_get_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_pipeline_job ] = mock_object @@ -4088,6 +4069,8 @@ async def test_get_pipeline_job_async( template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_pipeline_job(request) @@ -4109,6 +4092,8 @@ async def test_get_pipeline_job_async( assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -4441,12 +4426,7 @@ async def test_list_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_pipeline_jobs ] = mock_object @@ -5030,12 +5010,7 @@ async def test_delete_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_pipeline_job ] = mock_object @@ -5423,12 +5398,7 @@ async def test_batch_delete_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_delete_pipeline_jobs ] = mock_object @@ -5819,12 +5789,7 @@ async def test_cancel_pipeline_job_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.cancel_pipeline_job ] = mock_object @@ -6202,12 +6167,7 @@ async def test_batch_cancel_pipeline_jobs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_cancel_pipeline_jobs ] = mock_object @@ -6832,7 +6792,7 @@ def test_create_training_pipeline_rest_required_fields( response = client.create_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7162,7 +7122,7 @@ def test_get_training_pipeline_rest_required_fields( response = client.get_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7485,7 +7445,7 @@ def test_list_training_pipelines_rest_required_fields( response = client.list_training_pipelines(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7860,7 +7820,7 @@ def test_delete_training_pipeline_rest_required_fields( response = client.delete_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8167,7 +8127,7 @@ def test_cancel_training_pipeline_rest_required_fields( response = client.cancel_training_pipeline(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8436,6 +8396,8 @@ def test_create_pipeline_job_rest(request_type): "template_metadata": {"version": "version_value"}, "schedule_name": "schedule_name_value", "preflight_validations": True, + "satisfies_pzs": True, + "satisfies_pzi": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -8519,6 +8481,8 @@ def get_message_fields(field): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -8543,6 +8507,8 @@ def get_message_fields(field): assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_create_pipeline_job_rest_use_cached_wrapped_rpc(): @@ -8657,7 +8623,7 @@ def test_create_pipeline_job_rest_required_fields( response = client.create_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8857,6 +8823,8 @@ def test_get_pipeline_job_rest(request_type): template_uri="template_uri_value", schedule_name="schedule_name_value", preflight_validations=True, + satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj @@ -8881,6 +8849,8 @@ def test_get_pipeline_job_rest(request_type): assert response.template_uri == "template_uri_value" assert response.schedule_name == "schedule_name_value" assert response.preflight_validations is True + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_pipeline_job_rest_use_cached_wrapped_rpc(): @@ -8990,7 +8960,7 @@ def test_get_pipeline_job_rest_required_fields( response = client.get_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9309,7 +9279,7 @@ def test_list_pipeline_jobs_rest_required_fields( response = client.list_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9680,7 +9650,7 @@ def test_delete_pipeline_job_rest_required_fields( response = client.delete_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9991,7 +9961,7 @@ def test_batch_delete_pipeline_jobs_rest_required_fields( response = client.batch_delete_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10302,7 +10272,7 @@ def test_cancel_pipeline_job_rest_required_fields( response = client.cancel_pipeline_job(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10603,7 +10573,7 @@ def test_batch_cancel_pipeline_jobs_rest_required_fields( response = client.batch_cancel_pipeline_jobs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index 2d84c0a0d4..2b20e07918 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -1349,12 +1349,7 @@ async def test_predict_async_use_cached_wrapped_rpc(transport: str = "grpc_async ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.predict ] = mock_object @@ -1678,12 +1673,7 @@ async def test_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.raw_predict ] = mock_object @@ -2066,12 +2056,7 @@ async def test_stream_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_raw_predict ] = mock_object @@ -2445,12 +2430,7 @@ async def test_direct_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.direct_predict ] = mock_object @@ -2744,12 +2724,7 @@ async def test_direct_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.direct_raw_predict ] = mock_object @@ -2973,12 +2948,7 @@ async def test_stream_direct_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_direct_predict ] = mock_object @@ -3136,12 +3106,7 @@ async def test_stream_direct_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_direct_raw_predict ] = mock_object @@ -3296,12 +3261,7 @@ async def test_streaming_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_predict ] = mock_object @@ -3534,12 +3494,7 @@ async def test_server_streaming_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.server_streaming_predict ] = mock_object @@ -3763,12 +3718,7 @@ async def test_streaming_raw_predict_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.streaming_raw_predict ] = mock_object @@ -3991,12 +3941,7 @@ async def test_explain_async_use_cached_wrapped_rpc(transport: str = "grpc_async ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.explain ] = mock_object @@ -4318,12 +4263,7 @@ async def test_count_tokens_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.count_tokens ] = mock_object @@ -4698,12 +4638,7 @@ async def test_generate_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.generate_content ] = mock_object @@ -5087,12 +5022,7 @@ async def test_stream_generate_content_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stream_generate_content ] = mock_object @@ -5472,12 +5402,7 @@ async def test_chat_completions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.chat_completions ] = mock_object @@ -5832,7 +5757,7 @@ def test_predict_rest_required_fields(request_type=prediction_service.PredictReq response = client.predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6148,7 +6073,7 @@ def test_raw_predict_rest_required_fields( response = client.raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6464,7 +6389,7 @@ def test_stream_raw_predict_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.stream_raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6769,7 +6694,7 @@ def test_direct_predict_rest_required_fields( response = client.direct_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7020,7 +6945,7 @@ def test_direct_raw_predict_rest_required_fields( response = client.direct_raw_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7312,7 +7237,7 @@ def test_server_streaming_predict_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.server_streaming_predict(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7569,7 +7494,7 @@ def test_explain_rest_required_fields(request_type=prediction_service.ExplainReq response = client.explain(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7828,7 +7753,6 @@ def test_count_tokens_rest_required_fields( request_init = {} request_init["endpoint"] = "" - request_init["model"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7845,7 +7769,6 @@ def test_count_tokens_rest_required_fields( # verify required fields with default values are now present jsonified_request["endpoint"] = "endpoint_value" - jsonified_request["model"] = "model_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() @@ -7855,8 +7778,6 @@ def test_count_tokens_rest_required_fields( # verify required fields with non-default values are left alone assert "endpoint" in jsonified_request assert jsonified_request["endpoint"] == "endpoint_value" - assert "model" in jsonified_request - assert jsonified_request["model"] == "model_value" client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7895,7 +7816,7 @@ def test_count_tokens_rest_required_fields( response = client.count_tokens(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7906,17 +7827,7 @@ def test_count_tokens_rest_unset_required_fields(): ) unset_fields = transport.count_tokens._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "endpoint", - "model", - "instances", - "contents", - ) - ) - ) + assert set(unset_fields) == (set(()) & set(("endpoint",))) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -8212,7 +8123,7 @@ def test_generate_content_rest_required_fields( response = client.generate_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8540,7 +8451,7 @@ def test_stream_generate_content_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.stream_generate_content(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8947,7 +8858,7 @@ def test_chat_completions_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.chat_completions(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py index 71b781b7c7..c7265fd470 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py @@ -1416,12 +1416,7 @@ async def test_query_reasoning_engine_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.query_reasoning_engine ] = mock_object @@ -1707,7 +1702,7 @@ def test_query_reasoning_engine_rest_required_fields( response = client.query_reasoning_engine(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py index 74e92a6569..0e904a65eb 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py @@ -1389,12 +1389,7 @@ async def test_create_reasoning_engine_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_reasoning_engine ] = mock_object @@ -1801,12 +1796,7 @@ async def test_get_reasoning_engine_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_reasoning_engine ] = mock_object @@ -2204,12 +2194,7 @@ async def test_list_reasoning_engines_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_reasoning_engines ] = mock_object @@ -2790,12 +2775,7 @@ async def test_update_reasoning_engine_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_reasoning_engine ] = mock_object @@ -3193,12 +3173,7 @@ async def test_delete_reasoning_engine_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_reasoning_engine ] = mock_object @@ -3643,7 +3618,7 @@ def test_create_reasoning_engine_rest_required_fields( response = client.create_reasoning_engine(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3969,7 +3944,7 @@ def test_get_reasoning_engine_rest_required_fields( response = client.get_reasoning_engine(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4294,7 +4269,7 @@ def test_list_reasoning_engines_rest_required_fields( response = client.list_reasoning_engines(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4758,7 +4733,7 @@ def test_update_reasoning_engine_rest_required_fields( response = client.update_reasoning_engine(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5083,7 +5058,7 @@ def test_delete_reasoning_engine_rest_required_fields( response = client.delete_reasoning_engine(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py index 1f7e9b05fc..a3089eb8f7 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py @@ -1374,12 +1374,7 @@ async def test_create_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_schedule ] = mock_object @@ -1766,12 +1761,7 @@ async def test_delete_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_schedule ] = mock_object @@ -2158,12 +2148,7 @@ async def test_get_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_schedule ] = mock_object @@ -2543,12 +2528,7 @@ async def test_list_schedules_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_schedules ] = mock_object @@ -3095,12 +3075,7 @@ async def test_pause_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.pause_schedule ] = mock_object @@ -3448,12 +3423,7 @@ async def test_resume_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.resume_schedule ] = mock_object @@ -3836,12 +3806,7 @@ async def test_update_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_schedule ] = mock_object @@ -4192,6 +4157,8 @@ def test_create_schedule_rest(request_type): "template_metadata": {"version": "version_value"}, "schedule_name": "schedule_name_value", "preflight_validations": True, + "satisfies_pzs": True, + "satisfies_pzi": True, }, "pipeline_job_id": "pipeline_job_id_value", }, @@ -4599,7 +4566,7 @@ def test_create_schedule_rest_required_fields( response = client.create_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4908,7 +4875,7 @@ def test_delete_schedule_rest_required_fields( response = client.delete_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5228,7 +5195,7 @@ def test_get_schedule_rest_required_fields( response = client.get_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5540,7 +5507,7 @@ def test_list_schedules_rest_required_fields( response = client.list_schedules(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5903,7 +5870,7 @@ def test_pause_schedule_rest_required_fields( response = client.pause_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6191,7 +6158,7 @@ def test_resume_schedule_rest_required_fields( response = client.resume_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6472,6 +6439,8 @@ def test_update_schedule_rest(request_type): "template_metadata": {"version": "version_value"}, "schedule_name": "schedule_name_value", "preflight_validations": True, + "satisfies_pzs": True, + "satisfies_pzi": True, }, "pipeline_job_id": "pipeline_job_id_value", }, @@ -6876,7 +6845,7 @@ def test_update_schedule_rest_required_fields( response = client.update_schedule(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index d11905a6ea..af928e0092 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -1383,12 +1383,7 @@ async def test_create_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_specialist_pool ] = mock_object @@ -1801,12 +1796,7 @@ async def test_get_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_specialist_pool ] = mock_object @@ -2206,12 +2196,7 @@ async def test_list_specialist_pools_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_specialist_pools ] = mock_object @@ -2796,12 +2781,7 @@ async def test_delete_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_specialist_pool ] = mock_object @@ -3185,12 +3165,7 @@ async def test_update_specialist_pool_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_specialist_pool ] = mock_object @@ -3645,7 +3620,7 @@ def test_create_specialist_pool_rest_required_fields( response = client.create_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -3975,7 +3950,7 @@ def test_get_specialist_pool_rest_required_fields( response = client.get_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4300,7 +4275,7 @@ def test_list_specialist_pools_rest_required_fields( response = client.list_specialist_pools(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -4680,7 +4655,7 @@ def test_delete_specialist_pool_rest_required_fields( response = client.delete_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5077,7 +5052,7 @@ def test_update_specialist_pool_rest_required_fields( response = client.update_specialist_pool(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 39cce61349..94f12b554a 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -1367,12 +1367,7 @@ async def test_create_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard ] = mock_object @@ -1782,12 +1777,7 @@ async def test_get_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard ] = mock_object @@ -2175,12 +2165,7 @@ async def test_update_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard ] = mock_object @@ -2582,12 +2567,7 @@ async def test_list_tensorboards_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboards ] = mock_object @@ -3171,12 +3151,7 @@ async def test_delete_tensorboard_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard ] = mock_object @@ -3560,12 +3535,7 @@ async def test_read_tensorboard_usage_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_usage ] = mock_object @@ -3950,12 +3920,7 @@ async def test_read_tensorboard_size_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_size ] = mock_object @@ -4357,12 +4322,7 @@ async def test_create_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_experiment ] = mock_object @@ -4798,12 +4758,7 @@ async def test_get_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_experiment ] = mock_object @@ -5207,12 +5162,7 @@ async def test_update_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_experiment ] = mock_object @@ -5632,12 +5582,7 @@ async def test_list_tensorboard_experiments_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_experiments ] = mock_object @@ -6227,12 +6172,7 @@ async def test_delete_tensorboard_experiment_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_experiment ] = mock_object @@ -6632,12 +6572,7 @@ async def test_create_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_run ] = mock_object @@ -7046,12 +6981,7 @@ async def test_batch_create_tensorboard_runs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_tensorboard_runs ] = mock_object @@ -7466,12 +7396,7 @@ async def test_get_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_run ] = mock_object @@ -7870,12 +7795,7 @@ async def test_update_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_run ] = mock_object @@ -8285,12 +8205,7 @@ async def test_list_tensorboard_runs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_runs ] = mock_object @@ -8875,12 +8790,7 @@ async def test_delete_tensorboard_run_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_run ] = mock_object @@ -9268,12 +9178,7 @@ async def test_batch_create_tensorboard_time_series_async_use_cached_wrapped_rpc ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_tensorboard_time_series ] = mock_object @@ -9723,12 +9628,7 @@ async def test_create_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_tensorboard_time_series ] = mock_object @@ -10170,12 +10070,7 @@ async def test_get_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_tensorboard_time_series ] = mock_object @@ -10595,12 +10490,7 @@ async def test_update_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_tensorboard_time_series ] = mock_object @@ -11027,12 +10917,7 @@ async def test_list_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tensorboard_time_series ] = mock_object @@ -11623,12 +11508,7 @@ async def test_delete_tensorboard_time_series_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_tensorboard_time_series ] = mock_object @@ -12020,12 +11900,7 @@ async def test_batch_read_tensorboard_time_series_data_async_use_cached_wrapped_ ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.batch_read_tensorboard_time_series_data ] = mock_object @@ -12417,12 +12292,7 @@ async def test_read_tensorboard_time_series_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_time_series_data ] = mock_object @@ -12808,12 +12678,7 @@ async def test_read_tensorboard_blob_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_tensorboard_blob_data ] = mock_object @@ -13202,12 +13067,7 @@ async def test_write_tensorboard_experiment_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_tensorboard_experiment_data ] = mock_object @@ -13623,12 +13483,7 @@ async def test_write_tensorboard_run_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.write_tensorboard_run_data ] = mock_object @@ -14053,12 +13908,7 @@ async def test_export_tensorboard_time_series_data_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.export_tensorboard_time_series_data ] = mock_object @@ -14705,7 +14555,7 @@ def test_create_tensorboard_rest_required_fields( response = client.create_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15034,7 +14884,7 @@ def test_get_tensorboard_rest_required_fields( response = client.get_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15423,7 +15273,7 @@ def test_update_tensorboard_rest_required_fields( response = client.update_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -15756,7 +15606,7 @@ def test_list_tensorboards_rest_required_fields( response = client.list_tensorboards(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16129,7 +15979,7 @@ def test_delete_tensorboard_rest_required_fields( response = client.delete_tensorboard(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16440,7 +16290,7 @@ def test_read_tensorboard_usage_rest_required_fields( response = client.read_tensorboard_usage(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -16759,7 +16609,7 @@ def test_read_tensorboard_size_rest_required_fields( response = client.read_tensorboard_size(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17185,6 +17035,7 @@ def test_create_tensorboard_experiment_rest_required_fields( "tensorboardExperimentId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17528,7 +17379,7 @@ def test_get_tensorboard_experiment_rest_required_fields( response = client.get_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -17936,7 +17787,7 @@ def test_update_tensorboard_experiment_rest_required_fields( response = client.update_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18287,7 +18138,7 @@ def test_list_tensorboard_experiments_rest_required_fields( response = client.list_tensorboard_experiments(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -18674,7 +18525,7 @@ def test_delete_tensorboard_experiment_rest_required_fields( response = client.delete_tensorboard_experiment(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19092,6 +18943,7 @@ def test_create_tensorboard_run_rest_required_fields( "tensorboardRunId", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19422,7 +19274,7 @@ def test_batch_create_tensorboard_runs_rest_required_fields( response = client.batch_create_tensorboard_runs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -19764,7 +19616,7 @@ def test_get_tensorboard_run_rest_required_fields( response = client.get_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20162,7 +20014,7 @@ def test_update_tensorboard_run_rest_required_fields( response = client.update_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20503,7 +20355,7 @@ def test_list_tensorboard_runs_rest_required_fields( response = client.list_tensorboard_runs(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -20885,7 +20737,7 @@ def test_delete_tensorboard_run_rest_required_fields( response = client.delete_tensorboard_run(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21206,7 +21058,7 @@ def test_batch_create_tensorboard_time_series_rest_required_fields( response = client.batch_create_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -21660,7 +21512,7 @@ def test_create_tensorboard_time_series_rest_required_fields( response = client.create_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22014,7 +21866,7 @@ def test_get_tensorboard_time_series_rest_required_fields( response = client.get_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22437,7 +22289,7 @@ def test_update_tensorboard_time_series_rest_required_fields( response = client.update_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -22792,7 +22644,7 @@ def test_list_tensorboard_time_series_rest_required_fields( response = client.list_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23182,7 +23034,7 @@ def test_delete_tensorboard_time_series_rest_required_fields( response = client.delete_tensorboard_time_series(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23526,6 +23378,7 @@ def test_batch_read_tensorboard_time_series_data_rest_required_fields( "timeSeries", "", ), + ("$alt", "json;enum-encoding=int"), ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -23873,7 +23726,7 @@ def test_read_tensorboard_time_series_data_rest_required_fields( response = client.read_tensorboard_time_series_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24219,7 +24072,7 @@ def test_read_tensorboard_blob_data_rest_required_fields( iter_content.return_value = iter(json_return_value) response = client.read_tensorboard_blob_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24548,7 +24401,7 @@ def test_write_tensorboard_experiment_data_rest_required_fields( response = client.write_tensorboard_experiment_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -24893,7 +24746,7 @@ def test_write_tensorboard_run_data_rest_required_fields( response = client.write_tensorboard_run_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -25237,7 +25090,7 @@ def test_export_tensorboard_time_series_data_rest_required_fields( response = client.export_tensorboard_time_series_data(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py index 21611f7c90..ce5ff3f467 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -57,6 +57,7 @@ ) from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import transports +from google.cloud.aiplatform_v1beta1.types import api_auth from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import vertex_rag_data @@ -1379,12 +1380,7 @@ async def test_create_rag_corpus_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_rag_corpus ] = mock_object @@ -1776,12 +1772,7 @@ async def test_get_rag_corpus_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_rag_corpus ] = mock_object @@ -2154,12 +2145,7 @@ async def test_list_rag_corpora_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_rag_corpora ] = mock_object @@ -2723,12 +2709,7 @@ async def test_delete_rag_corpus_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_rag_corpus ] = mock_object @@ -3099,12 +3080,7 @@ async def test_upload_rag_file_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.upload_rag_file ] = mock_object @@ -3519,12 +3495,7 @@ async def test_import_rag_files_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.import_rag_files ] = mock_object @@ -3926,12 +3897,7 @@ async def test_get_rag_file_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_rag_file ] = mock_object @@ -4308,12 +4274,7 @@ async def test_list_rag_files_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_rag_files ] = mock_object @@ -4867,12 +4828,7 @@ async def test_delete_rag_file_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_rag_file ] = mock_object @@ -5301,7 +5257,7 @@ def test_create_rag_corpus_rest_required_fields( response = client.create_rag_corpus(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5618,7 +5574,7 @@ def test_get_rag_corpus_rest_required_fields( response = client.get_rag_corpus(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -5934,7 +5890,7 @@ def test_list_rag_corpora_rest_required_fields( response = client.list_rag_corpora(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6304,7 +6260,7 @@ def test_delete_rag_corpus_rest_required_fields( response = client.delete_rag_corpus(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6609,7 +6565,7 @@ def test_upload_rag_file_rest_required_fields( response = client.upload_rag_file(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -6940,7 +6896,7 @@ def test_import_rag_files_rest_required_fields( response = client.import_rag_files(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7271,7 +7227,7 @@ def test_get_rag_file_rest_required_fields( response = client.get_rag_file(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7585,7 +7541,7 @@ def test_list_rag_files_rest_required_fields( response = client.list_rag_files(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7957,7 +7913,7 @@ def test_delete_rag_file_rest_required_fields( response = client.delete_rag_file(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8859,8 +8815,36 @@ def test_parse_rag_file_path(): assert expected == actual +def test_secret_version_path(): + project = "whelk" + secret = "octopus" + secret_version = "oyster" + expected = "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + actual = VertexRagDataServiceClient.secret_version_path( + project, secret, secret_version + ) + assert expected == actual + + +def test_parse_secret_version_path(): + expected = { + "project": "nudibranch", + "secret": "cuttlefish", + "secret_version": "mussel", + } + path = VertexRagDataServiceClient.secret_version_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_secret_version_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -8870,7 +8854,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "nautilus", } path = VertexRagDataServiceClient.common_billing_account_path(**expected) @@ -8880,7 +8864,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "scallop" expected = "folders/{folder}".format( folder=folder, ) @@ -8890,7 +8874,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "abalone", } path = VertexRagDataServiceClient.common_folder_path(**expected) @@ -8900,7 +8884,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "squid" expected = "organizations/{organization}".format( organization=organization, ) @@ -8910,7 +8894,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "clam", } path = VertexRagDataServiceClient.common_organization_path(**expected) @@ -8920,7 +8904,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "whelk" expected = "projects/{project}".format( project=project, ) @@ -8930,7 +8914,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "octopus", } path = VertexRagDataServiceClient.common_project_path(**expected) @@ -8940,8 +8924,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -8952,8 +8936,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "cuttlefish", + "location": "mussel", } path = VertexRagDataServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py index c9a0695c23..9f678ff623 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py @@ -1326,12 +1326,7 @@ async def test_retrieve_contexts_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.retrieve_contexts ] = mock_object @@ -1694,7 +1689,7 @@ def test_retrieve_contexts_rest_required_fields( response = client.retrieve_contexts(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index 31f0f2dd3b..746c254b2d 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -1305,12 +1305,7 @@ async def test_create_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_study ] = mock_object @@ -1693,12 +1688,7 @@ async def test_get_study_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_study ] = mock_object @@ -2066,12 +2056,7 @@ async def test_list_studies_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_studies ] = mock_object @@ -2618,12 +2603,7 @@ async def test_delete_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_study ] = mock_object @@ -2989,12 +2969,7 @@ async def test_lookup_study_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.lookup_study ] = mock_object @@ -3361,12 +3336,7 @@ async def test_suggest_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.suggest_trials ] = mock_object @@ -3664,12 +3634,7 @@ async def test_create_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_trial ] = mock_object @@ -4062,12 +4027,7 @@ async def test_get_trial_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_trial ] = mock_object @@ -4439,12 +4399,7 @@ async def test_list_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_trials ] = mock_object @@ -5026,12 +4981,7 @@ async def test_add_trial_measurement_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.add_trial_measurement ] = mock_object @@ -5345,12 +5295,7 @@ async def test_complete_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.complete_trial ] = mock_object @@ -5633,12 +5578,7 @@ async def test_delete_trial_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_trial ] = mock_object @@ -6005,12 +5945,7 @@ async def test_check_trial_early_stopping_state_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.check_trial_early_stopping_state ] = mock_object @@ -6313,12 +6248,7 @@ async def test_stop_trial_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.stop_trial ] = mock_object @@ -6615,12 +6545,7 @@ async def test_list_optimal_trials_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_optimal_trials ] = mock_object @@ -7139,7 +7064,7 @@ def test_create_study_rest_required_fields( response = client.create_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7454,7 +7379,7 @@ def test_get_study_rest_required_fields(request_type=vizier_service.GetStudyRequ response = client.get_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -7760,7 +7685,7 @@ def test_list_studies_rest_required_fields( response = client.list_studies(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8120,7 +8045,7 @@ def test_delete_study_rest_required_fields( response = client.delete_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8424,7 +8349,7 @@ def test_lookup_study_rest_required_fields( response = client.lookup_study(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -8738,7 +8663,7 @@ def test_suggest_trials_rest_required_fields( response = client.suggest_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9103,7 +9028,7 @@ def test_create_trial_rest_required_fields( response = client.create_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9426,7 +9351,7 @@ def test_get_trial_rest_required_fields(request_type=vizier_service.GetTrialRequ response = client.get_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -9736,7 +9661,7 @@ def test_list_trials_rest_required_fields( response = client.list_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10124,7 +10049,7 @@ def test_add_trial_measurement_rest_required_fields( response = client.add_trial_measurement(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10391,7 +10316,7 @@ def test_complete_trial_rest_required_fields( response = client.complete_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10631,7 +10556,7 @@ def test_delete_trial_rest_required_fields( response = client.delete_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -10932,7 +10857,7 @@ def test_check_trial_early_stopping_state_rest_required_fields( response = client.check_trial_early_stopping_state(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11196,7 +11121,7 @@ def test_stop_trial_rest_required_fields(request_type=vizier_service.StopTrialRe response = client.stop_trial(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params @@ -11444,7 +11369,7 @@ def test_list_optimal_trials_rest_required_fields( response = client.list_optimal_trials(request) - expected_params = [] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params diff --git a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py index eeb7945167..307ecd045f 100644 --- a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py +++ b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py @@ -38,6 +38,7 @@ _TEST_LOCATION = "us-central1" _TEST_PROJECT = "test-project" _TEST_MODEL = "gemini-1.0-pro" +_TEST_SYSTEM_INSTRUCTION = "You are a helpful bot." def place_tool_query( @@ -173,6 +174,7 @@ def test_initialization_with_tools(self, mock_chatvertexai): ] agent = reasoning_engines.LangchainAgent( model=_TEST_MODEL, + system_instruction=_TEST_SYSTEM_INSTRUCTION, tools=tools, ) for tool, agent_tool in zip(tools, agent._tools): @@ -255,11 +257,6 @@ def test_enable_tracing_warning(self, caplog, langchain_instrumentor_none_mock): assert "enable_tracing=True but proceeding with tracing disabled" in caplog.text -class TestConvertToolsOrRaise: - def test_convert_tools_or_raise(self, vertexai_init_mock): - pass - - def _return_input_no_typing(input_): """Returns input back to user.""" return input_ @@ -272,3 +269,20 @@ def test_raise_untyped_input_args(self, vertexai_init_mock): model=_TEST_MODEL, tools=[_return_input_no_typing], ) + + +class TestSystemInstructionAndPromptRaisesErrors: + def test_raise_both_system_instruction_and_prompt_error(self, vertexai_init_mock): + with pytest.raises( + ValueError, + match=r"Only one of `prompt` or `system_instruction` should be specified.", + ): + reasoning_engines.LangchainAgent( + model=_TEST_MODEL, + system_instruction=_TEST_SYSTEM_INSTRUCTION, + prompt=prompts.ChatPromptTemplate.from_messages( + [ + ("user", "{input}"), + ] + ), + ) diff --git a/tests/unit/vertex_langchain/test_reasoning_engines.py b/tests/unit/vertex_langchain/test_reasoning_engines.py index 3f88b761b1..f791b64fbe 100644 --- a/tests/unit/vertex_langchain/test_reasoning_engines.py +++ b/tests/unit/vertex_langchain/test_reasoning_engines.py @@ -12,11 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from absl.testing import parameterized import cloudpickle +import difflib import importlib +import os +import pytest import sys import tarfile -from absl.testing import parameterized +import tempfile from typing import Optional from unittest import mock @@ -31,9 +35,9 @@ from google.cloud.aiplatform_v1beta1.services import reasoning_engine_execution_service from google.cloud.aiplatform_v1beta1.services import reasoning_engine_service from vertexai.preview import reasoning_engines -from vertexai.reasoning_engines import _utils from vertexai.reasoning_engines import _reasoning_engines -import pytest +from vertexai.reasoning_engines import _utils +from google.protobuf import field_mask_pb2 class CapitalizeEngine: @@ -61,6 +65,7 @@ def clone(self): f"{_TEST_PARENT}/reasoningEngines/{_TEST_RESOURCE_ID}" ) _TEST_REASONING_ENGINE_DISPLAY_NAME = "Reasoning Engine Display Name" +_TEST_REASONING_ENGINE_DESCRIPTION = "Reasoning Engine Description" _TEST_GCS_DIR_NAME = _reasoning_engines._DEFAULT_GCS_DIR_NAME _TEST_BLOB_FILENAME = _reasoning_engines._BLOB_FILENAME _TEST_REQUIREMENTS_FILE = _reasoning_engines._REQUIREMENTS_FILE @@ -124,6 +129,17 @@ def clone(self): _TEST_REASONING_ENGINE_OBJ.spec.class_methods.append( _TEST_REASONING_ENGINE_QUERY_SCHEMA ) +_TEST_UPDATE_REASONING_ENGINE_OBJ = types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + spec=types.ReasoningEngineSpec( + package_spec=types.ReasoningEngineSpec.PackageSpec( + pickle_object_gcs_uri=_TEST_REASONING_ENGINE_GCS_URI, + ), + ), +) +_TEST_UPDATE_REASONING_ENGINE_OBJ.spec.class_methods.append( + _TEST_REASONING_ENGINE_QUERY_SCHEMA +) _TEST_REASONING_ENGINE_QUERY_REQUEST = types.QueryReasoningEngineRequest( name=_TEST_REASONING_ENGINE_RESOURCE_NAME, input={"query": _TEST_QUERY_PROMPT}, @@ -131,6 +147,32 @@ def clone(self): _TEST_REASONING_ENGINE_QUERY_RESPONSE = {} _TEST_REASONING_ENGINE_OPERATION_SCHEMAS = [] _TEST_REASONING_ENGINE_SYS_VERSION = "3.10" +_TEST_REASONING_ENGINE_EXTRA_PACKAGE = "fake.py" + + +def _create_empty_fake_package(package_name: str) -> str: + """Creates a temporary directory structure representing an empty fake Python package. + + Args: + package_name (str): The name of the fake package. + + Returns: + str: The path to the top-level directory of the fake package. + """ + temp_dir = tempfile.mkdtemp() + package_dir = os.path.join(temp_dir, package_name) + os.makedirs(package_dir) + + # Create an empty __init__.py file to mark it as a package + init_path = os.path.join(package_dir, "__init__.py") + open(init_path, "w").close() + + return temp_dir + + +_TEST_REASONING_ENGINE_EXTRA_PACKAGE_PATH = _create_empty_fake_package( + _TEST_REASONING_ENGINE_EXTRA_PACKAGE +) @pytest.fixture(scope="module") @@ -217,6 +259,16 @@ def create_reasoning_engine_mock(): yield create_reasoning_engine_mock +# Function scope is required for the pytest parameterized tests. +@pytest.fixture(scope="function") +def update_reasoning_engine_mock(): + with mock.patch.object( + reasoning_engine_service.ReasoningEngineServiceClient, + "update_reasoning_engine", + ) as update_reasoning_engine_mock: + yield update_reasoning_engine_mock + + @pytest.fixture(scope="module") def delete_reasoning_engine_mock(): with mock.patch.object( @@ -397,6 +449,154 @@ def test_create_reasoning_engine_requirements_from_file( retry=_TEST_RETRY, ) + # pytest does not allow absl.testing.parameterized.named_parameters. + @pytest.mark.parametrize( + "test_case_name, test_kwargs, want_request", + [ + ( + "Update the requirements", + {"requirements": _TEST_REASONING_ENGINE_REQUIREMENTS}, + types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + spec=types.ReasoningEngineSpec( + package_spec=types.ReasoningEngineSpec.PackageSpec( + requirements_gcs_uri=( + _TEST_REASONING_ENGINE_REQUIREMENTS_GCS_URI + ), + ), + ), + ), + update_mask=field_mask_pb2.FieldMask( + paths=["spec.package_spec.requirements_gcs_uri"] + ), + ), + ), + ( + "Update the extra_packages", + {"extra_packages": [_TEST_REASONING_ENGINE_EXTRA_PACKAGE_PATH]}, + types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + spec=types.ReasoningEngineSpec( + package_spec=types.ReasoningEngineSpec.PackageSpec( + dependency_files_gcs_uri=( + _TEST_REASONING_ENGINE_DEPENDENCY_FILES_GCS_URI + ), + ), + ), + ), + update_mask=field_mask_pb2.FieldMask( + paths=["spec.package_spec.dependency_files_gcs_uri"] + ), + ), + ), + ( + "Update the reasoning_engine", + {"reasoning_engine": CapitalizeEngine()}, + types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=_TEST_UPDATE_REASONING_ENGINE_OBJ, + update_mask=field_mask_pb2.FieldMask( + paths=[ + "spec.package_spec.pickle_object_gcs_uri", + "spec.class_methods", + ] + ), + ), + ), + ( + "Update the display_name", + {"display_name": _TEST_REASONING_ENGINE_DISPLAY_NAME}, + types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + ), + update_mask=field_mask_pb2.FieldMask(paths=["display_name"]), + ), + ), + ( + "Update the description", + {"description": _TEST_REASONING_ENGINE_DESCRIPTION}, + types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + description=_TEST_REASONING_ENGINE_DESCRIPTION, + ), + update_mask=field_mask_pb2.FieldMask(paths=["description"]), + ), + ), + ], + ) + def test_update_reasoning_engine( + self, + test_case_name, + test_kwargs, + want_request, + update_reasoning_engine_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update(**test_kwargs) + assert_called_with_diff( + update_reasoning_engine_mock, + {"request": want_request}, + ) + + @pytest.mark.usefixtures("caplog") + def test_update_reasoning_engine_warn_sys_version( + self, + caplog, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + # Update the reasoning engine's sys_version alone will raise + # `no updates` error, so we need to update the display_name as well. + test_reasoning_engine.update( + sys_version="3.10", display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME + ) + assert "Updated sys_version is not supported." in caplog.text + + def test_update_reasoning_engine_requirements_from_file( + self, + update_reasoning_engine_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + with mock.patch( + "builtins.open", + mock.mock_open(read_data="google-cloud-aiplatform==1.29.0"), + ) as mock_file: + test_reasoning_engine.update( + requirements="requirements.txt", + ) + mock_file.assert_called_with("requirements.txt") + assert_called_with_diff( + update_reasoning_engine_mock, + { + "request": types.reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + spec=types.ReasoningEngineSpec( + package_spec=types.ReasoningEngineSpec.PackageSpec( + requirements_gcs_uri=( + _TEST_REASONING_ENGINE_REQUIREMENTS_GCS_URI + ), + ), + ), + ), + update_mask=field_mask_pb2.FieldMask( + paths=["spec.package_spec.requirements_gcs_uri"] + ), + ) + }, + ) + def test_delete_after_create_reasoning_engine( self, create_reasoning_engine_mock, @@ -620,6 +820,135 @@ def test_create_reasoning_engine_with_invalid_query_method( requirements=_TEST_REASONING_ENGINE_REQUIREMENTS, ) + def test_update_reasoning_engine_unspecified_staging_bucket( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + ): + with pytest.raises( + ValueError, + match="Please provide a `staging_bucket`", + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + importlib.reload(initializer) + importlib.reload(aiplatform) + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + ) + test_reasoning_engine.update( + reasoning_engine=InvalidCapitalizeEngineWithoutQueryMethod(), + ) + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + staging_bucket=_TEST_STAGING_BUCKET, + ) + + def test_update_reasoning_engine_no_query_method( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises( + TypeError, + match="does not have a callable method named `query`", + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update( + reasoning_engine=InvalidCapitalizeEngineWithoutQueryMethod(), + ) + + def test_update_reasoning_engine_noncallable_query_attribute( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises( + TypeError, + match="does not have a callable method named `query`", + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update( + reasoning_engine=InvalidCapitalizeEngineWithNoncallableQuery(), + ) + + def test_update_reasoning_engine_requirements_ioerror( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises(IOError, match="Failed to read requirements"): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update( + requirements="nonexistent_requirements.txt", + ) + + def test_update_reasoning_engine_nonexistent_extra_packages( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises(FileNotFoundError, match="not found"): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update( + extra_packages=_TEST_REASONING_ENGINE_INVALID_EXTRA_PACKAGES, + ) + + def test_update_reasoning_engine_with_invalid_query_method( + self, + update_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises(ValueError, match="Invalid query signature"): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update( + reasoning_engine=InvalidCapitalizeEngineWithoutQuerySelf(), + ) + + def test_update_reasoning_engine_with_no_updates( + self, + update_reasoning_engine_mock, + ): + with pytest.raises( + ValueError, + match=( + "At least one of `reasoning_engine`, `requirements`, " + "`extra_packages`, `display_name`, or `description` " + "must be specified." + ), + ): + test_reasoning_engine = _generate_reasoning_engine_to_update() + test_reasoning_engine.update() + + +def _generate_reasoning_engine_to_update() -> "reasoning_engines.ReasoningEngine": + test_reasoning_engine = reasoning_engines.ReasoningEngine.create(CapitalizeEngine()) + # Resource name is required for the update method. + test_reasoning_engine._gca_resource = types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + ) + return test_reasoning_engine + def place_tool_query( city: str, @@ -639,6 +968,32 @@ def place_photo_query( pass +def assert_called_with_diff(mock_obj, expected_kwargs=None): + """Asserts that the mock object was called with the expected arguments, + using difflib to show any differences. + + Args: + mock_obj: The mock object to check. + expected_kwargs: Expected keyword arguments, or None if not checking. + """ + assert mock_obj.called, ( + f"Expected '{mock_obj._extract_mock_name()}' to be called, ", + "but it was not.", + ) + + _, call_kwargs = mock_obj.call_args_list[0] + diff = "\n".join( + difflib.ndiff( + str(call_kwargs or "").splitlines(), str(expected_kwargs or "").splitlines() + ) + ) + assert call_kwargs == expected_kwargs, ( + "Unexpected keyword arguments for " + f"'{mock_obj._extract_mock_name()}'.\n" + f"Diff (-got +want):\n{diff}" + ) + + class TestGenerateSchema(parameterized.TestCase): @parameterized.named_parameters( dict( diff --git a/tests/unit/vertex_rag/test_rag_constants.py b/tests/unit/vertex_rag/test_rag_constants.py index 75edcabfbb..10203e2ec0 100644 --- a/tests/unit/vertex_rag/test_rag_constants.py +++ b/tests/unit/vertex_rag/test_rag_constants.py @@ -15,24 +15,25 @@ # limitations under the License. # -from vertexai.preview.rag.utils.resources import ( - EmbeddingModelConfig, - RagCorpus, - RagFile, - RagResource, -) + from google.cloud import aiplatform + +from vertexai.preview import rag from google.cloud.aiplatform_v1beta1 import ( GoogleDriveSource, RagFileChunkingConfig, ImportRagFilesConfig, ImportRagFilesRequest, ImportRagFilesResponse, + JiraSource as GapicJiraSource, RagCorpus as GapicRagCorpus, RagFile as GapicRagFile, + SlackSource as GapicSlackSource, RagContexts, RetrieveContextsResponse, ) +from google.cloud.aiplatform_v1beta1.types import api_auth +from google.protobuf import timestamp_pb2 TEST_PROJECT = "test-project" @@ -55,10 +56,10 @@ TEST_PROJECT, TEST_REGION ) ) -TEST_EMBEDDING_MODEL_CONFIG = EmbeddingModelConfig( +TEST_EMBEDDING_MODEL_CONFIG = rag.EmbeddingModelConfig( publisher_model="publishers/google/models/textembedding-gecko", ) -TEST_RAG_CORPUS = RagCorpus( +TEST_RAG_CORPUS = rag.RagCorpus( name=TEST_RAG_CORPUS_RESOURCE_NAME, display_name=TEST_CORPUS_DISPLAY_NAME, description=TEST_CORPUS_DISCRIPTION, @@ -144,11 +145,114 @@ display_name=TEST_FILE_DISPLAY_NAME, description=TEST_FILE_DESCRIPTION, ) -TEST_RAG_FILE = RagFile( +TEST_RAG_FILE = rag.RagFile( name=TEST_RAG_FILE_RESOURCE_NAME, display_name=TEST_FILE_DISPLAY_NAME, description=TEST_FILE_DESCRIPTION, ) +# Slack sources +TEST_SLACK_CHANNEL_ID = "123" +TEST_SLACK_CHANNEL_ID_2 = "456" +TEST_SLACK_START_TIME = timestamp_pb2.Timestamp() +TEST_SLACK_START_TIME.GetCurrentTime() +TEST_SLACK_END_TIME = timestamp_pb2.Timestamp() +TEST_SLACK_END_TIME.GetCurrentTime() +TEST_SLACK_API_KEY_SECRET_VERSION = ( + "projects/test-project/secrets/test-secret/versions/1" +) +TEST_SLACK_API_KEY_SECRET_VERSION_2 = ( + "projects/test-project/secrets/test-secret/versions/2" +) +TEST_SLACK_SOURCE = rag.SlackChannelsSource( + channels=[ + rag.SlackChannel( + channel_id=TEST_SLACK_CHANNEL_ID, + api_key=TEST_SLACK_API_KEY_SECRET_VERSION, + start_time=TEST_SLACK_START_TIME, + end_time=TEST_SLACK_END_TIME, + ), + rag.SlackChannel( + channel_id=TEST_SLACK_CHANNEL_ID_2, + api_key=TEST_SLACK_API_KEY_SECRET_VERSION_2, + ), + ], +) +TEST_IMPORT_FILES_CONFIG_SLACK_SOURCE = ImportRagFilesConfig( + rag_file_chunking_config=RagFileChunkingConfig( + chunk_size=TEST_CHUNK_SIZE, + chunk_overlap=TEST_CHUNK_OVERLAP, + ) +) +TEST_IMPORT_FILES_CONFIG_SLACK_SOURCE.slack_source.channels = [ + GapicSlackSource.SlackChannels( + channels=[ + GapicSlackSource.SlackChannels.SlackChannel( + channel_id=TEST_SLACK_CHANNEL_ID, + start_time=TEST_SLACK_START_TIME, + end_time=TEST_SLACK_END_TIME, + ), + ], + api_key_config=api_auth.ApiAuth.ApiKeyConfig( + api_key_secret_version=TEST_SLACK_API_KEY_SECRET_VERSION + ), + ), + GapicSlackSource.SlackChannels( + channels=[ + GapicSlackSource.SlackChannels.SlackChannel( + channel_id=TEST_SLACK_CHANNEL_ID_2, + start_time=None, + end_time=None, + ), + ], + api_key_config=api_auth.ApiAuth.ApiKeyConfig( + api_key_secret_version=TEST_SLACK_API_KEY_SECRET_VERSION_2 + ), + ), +] +TEST_IMPORT_REQUEST_SLACK_SOURCE = ImportRagFilesRequest( + parent=TEST_RAG_CORPUS_RESOURCE_NAME, + import_rag_files_config=TEST_IMPORT_FILES_CONFIG_SLACK_SOURCE, +) +# Jira sources +TEST_JIRA_EMAIL = "test@test.com" +TEST_JIRA_PROJECT = "test-project" +TEST_JIRA_CUSTOM_QUERY = "test-custom-query" +TEST_JIRA_SERVER_URI = "test.atlassian.net" +TEST_JIRA_API_KEY_SECRET_VERSION = ( + "projects/test-project/secrets/test-secret/versions/1" +) +TEST_JIRA_SOURCE = rag.JiraSource( + queries=[ + rag.JiraQuery( + email=TEST_JIRA_EMAIL, + jira_projects=[TEST_JIRA_PROJECT], + custom_queries=[TEST_JIRA_CUSTOM_QUERY], + api_key=TEST_JIRA_API_KEY_SECRET_VERSION, + server_uri=TEST_JIRA_SERVER_URI, + ) + ], +) +TEST_IMPORT_FILES_CONFIG_JIRA_SOURCE = ImportRagFilesConfig( + rag_file_chunking_config=RagFileChunkingConfig( + chunk_size=TEST_CHUNK_SIZE, + chunk_overlap=TEST_CHUNK_OVERLAP, + ) +) +TEST_IMPORT_FILES_CONFIG_JIRA_SOURCE.jira_source.jira_queries = [ + GapicJiraSource.JiraQueries( + custom_queries=[TEST_JIRA_CUSTOM_QUERY], + projects=[TEST_JIRA_PROJECT], + email=TEST_JIRA_EMAIL, + server_uri=TEST_JIRA_SERVER_URI, + api_key_config=api_auth.ApiAuth.ApiKeyConfig( + api_key_secret_version=TEST_JIRA_API_KEY_SECRET_VERSION + ), + ) +] +TEST_IMPORT_REQUEST_JIRA_SOURCE = ImportRagFilesRequest( + parent=TEST_RAG_CORPUS_RESOURCE_NAME, + import_rag_files_config=TEST_IMPORT_FILES_CONFIG_JIRA_SOURCE, +) # Retrieval TEST_QUERY_TEXT = "What happen to the fox and the dog?" @@ -162,11 +266,11 @@ ] ) TEST_RETRIEVAL_RESPONSE = RetrieveContextsResponse(contexts=TEST_CONTEXTS) -TEST_RAG_RESOURCE = RagResource( +TEST_RAG_RESOURCE = rag.RagResource( rag_corpus=TEST_RAG_CORPUS_RESOURCE_NAME, rag_file_ids=[TEST_RAG_FILE_ID], ) -TEST_RAG_RESOURCE_INVALID_NAME = RagResource( +TEST_RAG_RESOURCE_INVALID_NAME = rag.RagResource( rag_corpus="213lkj-1/23jkl/", rag_file_ids=[TEST_RAG_FILE_ID], ) diff --git a/tests/unit/vertex_rag/test_rag_data.py b/tests/unit/vertex_rag/test_rag_data.py index fe3c6bb314..c060e5c1cd 100644 --- a/tests/unit/vertex_rag/test_rag_data.py +++ b/tests/unit/vertex_rag/test_rag_data.py @@ -158,6 +158,14 @@ def import_files_request_eq(returned_request, expected_request): returned_request.import_rag_files_config.google_drive_source.resource_ids == expected_request.import_rag_files_config.google_drive_source.resource_ids ) + assert ( + returned_request.import_rag_files_config.slack_source.channels + == expected_request.import_rag_files_config.slack_source.channels + ) + assert ( + returned_request.import_rag_files_config.jira_source.jira_queries + == expected_request.import_rag_files_config.jira_source.jira_queries + ) @pytest.mark.usefixtures("google_auth_mock") @@ -421,6 +429,24 @@ def test_prepare_import_files_request_invalid_path(self): ) e.match("path must be a Google Cloud Storage uri or a Google Drive url") + def test_prepare_import_files_request_slack_source(self): + request = prepare_import_files_request( + corpus_name=tc.TEST_RAG_CORPUS_RESOURCE_NAME, + source=tc.TEST_SLACK_SOURCE, + chunk_size=tc.TEST_CHUNK_SIZE, + chunk_overlap=tc.TEST_CHUNK_OVERLAP, + ) + import_files_request_eq(request, tc.TEST_IMPORT_REQUEST_SLACK_SOURCE) + + def test_prepare_import_files_request_jira_source(self): + request = prepare_import_files_request( + corpus_name=tc.TEST_RAG_CORPUS_RESOURCE_NAME, + source=tc.TEST_JIRA_SOURCE, + chunk_size=tc.TEST_CHUNK_SIZE, + chunk_overlap=tc.TEST_CHUNK_OVERLAP, + ) + import_files_request_eq(request, tc.TEST_IMPORT_REQUEST_JIRA_SOURCE) + def test_set_embedding_model_config_set_both_error(self): embedding_model_config = rag.EmbeddingModelConfig( publisher_model="whatever", diff --git a/vertexai/language_models/_language_models.py b/vertexai/language_models/_language_models.py index 2fcb80b05a..df756d268c 100644 --- a/vertexai/language_models/_language_models.py +++ b/vertexai/language_models/_language_models.py @@ -2092,6 +2092,8 @@ class TextEmbeddingInput: Specifies that the embeddings will be used for question answering. FACT_VERIFICATION Specifies that the embeddings will be used for fact verification. + CODE_RETRIEVAL_QUERY + Specifies that the embeddings will be used for code retrieval. title: Optional identifier of the text content. """ diff --git a/vertexai/preview/evaluation/_base.py b/vertexai/preview/evaluation/_base.py index de777f1aca..337dd16ab8 100644 --- a/vertexai/preview/evaluation/_base.py +++ b/vertexai/preview/evaluation/_base.py @@ -40,6 +40,7 @@ class EvaluationRunConfig: metrics: The list of metric names, or metric bundle names, or Metric instances to evaluate. column_map: The dictionary of column name overrides in the dataset. client: The evaluation service client. + evaluation_service_qps: The custom QPS limit for the evaluation service. retry_timeout: How long to keep retrying the evaluation requests, in seconds. """ @@ -47,6 +48,7 @@ class EvaluationRunConfig: metrics: List[Union[str, metrics_base._Metric]] column_map: Dict[str, str] client: gapic_evaluation_services.EvaluationServiceClient + evaluation_service_qps: float retry_timeout: float def validate_dataset_column(self, column_name: str) -> None: diff --git a/vertexai/preview/evaluation/_eval_tasks.py b/vertexai/preview/evaluation/_eval_tasks.py index 41b24ac45f..96a11e835d 100644 --- a/vertexai/preview/evaluation/_eval_tasks.py +++ b/vertexai/preview/evaluation/_eval_tasks.py @@ -255,6 +255,7 @@ def _evaluate_with_experiment( prompt_template: Optional[str] = None, experiment_run_name: Optional[str] = None, response_column_name: Optional[str] = None, + evaluation_service_qps: Optional[float] = None, retry_timeout: float = 600.0, ) -> EvalResult: """Runs an evaluation for the EvalTask with an experiment. @@ -271,6 +272,7 @@ def _evaluate_with_experiment( unique experiment run name is used. response_column_name: The column name of model response in the dataset. If provided, this will override the `response_column_name` of the `EvalTask`. + evaluation_service_qps: The custom QPS limit for the evaluation service. retry_timeout: How long to keep retrying the evaluation requests for the whole evaluation dataset, in seconds. @@ -288,6 +290,7 @@ def _evaluate_with_experiment( content_column_name=self.content_column_name, reference_column_name=self.reference_column_name, response_column_name=response_column_name, + evaluation_service_qps=evaluation_service_qps, retry_timeout=retry_timeout, ) @@ -308,6 +311,7 @@ def evaluate( prompt_template: Optional[str] = None, experiment_run_name: Optional[str] = None, response_column_name: Optional[str] = None, + evaluation_service_qps: Optional[float] = None, retry_timeout: float = 600.0, ) -> EvalResult: """Runs an evaluation for the EvalTask. @@ -324,6 +328,7 @@ def evaluate( unique experiment run name is used. response_column_name: The column name of model response in the dataset. If provided, this will override the `response_column_name` of the `EvalTask`. + evaluation_service_qps: The custom QPS limit for the evaluation service. retry_timeout: How long to keep retrying the evaluation requests for the whole evaluation dataset, in seconds. @@ -350,6 +355,7 @@ def evaluate( prompt_template, experiment_run_name, response_column_name, + evaluation_service_qps, retry_timeout, ) metadata._experiment_tracker.set_experiment( @@ -364,6 +370,7 @@ def evaluate( prompt_template, experiment_run_name, response_column_name, + evaluation_service_qps, retry_timeout, ) metadata._experiment_tracker.reset() @@ -373,6 +380,7 @@ def evaluate( prompt_template, experiment_run_name, response_column_name, + evaluation_service_qps, retry_timeout, ) else: @@ -384,6 +392,7 @@ def evaluate( content_column_name=self.content_column_name, reference_column_name=self.reference_column_name, response_column_name=response_column_name, + evaluation_service_qps=evaluation_service_qps, retry_timeout=retry_timeout, ) return eval_result diff --git a/vertexai/preview/evaluation/_evaluation.py b/vertexai/preview/evaluation/_evaluation.py index d0d4d2c56c..0a83c6173e 100644 --- a/vertexai/preview/evaluation/_evaluation.py +++ b/vertexai/preview/evaluation/_evaluation.py @@ -272,7 +272,7 @@ def _generate_response_from_gemini( f"Finish reason: {candidate.finish_reason}.\n" f"Finish message: {candidate.finish_message}.\n" f"Safety ratings: {candidate.safety_ratings}.\n" - "Please adjsut the model safety_settings, or try a different prompt." + "Please adjust the model safety_settings, or try a different prompt." ) return response.candidates[0].content.parts[0].text except Exception: @@ -295,9 +295,7 @@ def _generate_response_from_gemini_model( evaluation_run_config: Evaluation Run Configurations. is_baseline_model: Whether the model is a baseline model for PairwiseMetric. """ - max_workers = int( - constants.QuotaLimit.GEMINI_1_0_PRO_GENERATE_CONTENT_REQUESTS_PER_MINUTE / 2 - ) + # Ensure thread safety and avoid race conditions. df = evaluation_run_config.dataset.copy() @@ -310,7 +308,7 @@ def _generate_response_from_gemini_model( constants.Dataset.COMPLETED_PROMPT_COLUMN in evaluation_run_config.dataset.columns ): - with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + with futures.ThreadPoolExecutor(max_workers=constants.MAX_WORKERS) as executor: for _, row in df.iterrows(): tasks.append( executor.submit( @@ -323,7 +321,7 @@ def _generate_response_from_gemini_model( content_column_name = evaluation_run_config.column_map[ constants.Dataset.CONTENT_COLUMN ] - with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + with futures.ThreadPoolExecutor(max_workers=constants.MAX_WORKERS) as executor: for _, row in df.iterrows(): tasks.append( executor.submit( @@ -609,9 +607,10 @@ def _compute_metrics( instance_list = [] futures_by_metric = collections.defaultdict(list) - eval_max_workers = constants.QuotaLimit.EVAL_SERVICE_QPS + + rate_limiter = utils.RateLimiter(evaluation_run_config.evaluation_service_qps) with tqdm(total=api_request_count) as pbar: - with futures.ThreadPoolExecutor(max_workers=eval_max_workers) as executor: + with futures.ThreadPoolExecutor(max_workers=constants.MAX_WORKERS) as executor: for idx, row in evaluation_run_config.dataset.iterrows(): row_dict = _compute_custom_metrics(row.to_dict(), custom_metrics) @@ -626,6 +625,7 @@ def _compute_metrics( row_dict=row_dict, evaluation_run_config=evaluation_run_config, ), + rate_limiter=rate_limiter, retry_timeout=evaluation_run_config.retry_timeout, ) future.add_done_callback(lambda _: pbar.update(1)) @@ -686,6 +686,7 @@ def evaluate( response_column_name: str = "response", context_column_name: str = "context", instruction_column_name: str = "instruction", + evaluation_service_qps: Optional[float] = None, retry_timeout: float = 600.0, ) -> evaluation_base.EvalResult: """Runs the evaluation for metrics. @@ -712,6 +713,7 @@ def evaluate( not set, default to `context`. instruction_column_name: The column name of the instruction prompt in the dataset. If not set, default to `instruction`. + evaluation_service_qps: The custom QPS limit for the evaluation service. retry_timeout: How long to keep retrying the evaluation requests for the whole evaluation dataset, in seconds. Returns: @@ -741,6 +743,9 @@ def evaluate( constants.Dataset.INSTRUCTION_COLUMN: instruction_column_name, }, client=utils.create_evaluation_service_client(), + evaluation_service_qps=evaluation_service_qps + if evaluation_service_qps + else constants.QuotaLimit.EVAL_SERVICE_QPS, retry_timeout=retry_timeout, ) diff --git a/vertexai/preview/evaluation/constants.py b/vertexai/preview/evaluation/constants.py index f646a69ad5..40fd3c0fa7 100644 --- a/vertexai/preview/evaluation/constants.py +++ b/vertexai/preview/evaluation/constants.py @@ -17,6 +17,10 @@ """Constants for evaluation.""" import dataclasses +# The number of concurrent workers to use for making model inference and +# evaluation requests. +MAX_WORKERS = 100 + @dataclasses.dataclass(frozen=True) class Metric: @@ -193,4 +197,7 @@ class QuotaLimit: # Default queries per minute (QPM) quota for `gemini-1.0-pro` base model. GEMINI_1_0_PRO_GENERATE_CONTENT_REQUESTS_PER_MINUTE = 300 - EVAL_SERVICE_QPS = 10 + # Evaluation Service QPS limit can be computed by + # (GEMINI_1_5_PRO_GENERATE_CONTENT_REQUESTS_QPM / 60 / Number of Samples) + # 0.25 = 300 / 60 / 4 + EVAL_SERVICE_QPS = 0.25 diff --git a/vertexai/preview/evaluation/metrics/_coherence.py b/vertexai/preview/evaluation/metrics/_coherence.py index 560dede0f6..3abbd99532 100644 --- a/vertexai/preview/evaluation/metrics/_coherence.py +++ b/vertexai/preview/evaluation/metrics/_coherence.py @@ -15,9 +15,17 @@ # limitations under the License. # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class `Coherence` will" + " result in an error. Please use string metric name `coherence` or define" + " a PointwiseMetric instead." +) + class Coherence(_base._ModelBasedMetric): """The model-based pointwise metric for Coherence.""" @@ -25,6 +33,7 @@ class Coherence(_base._ModelBasedMetric): _metric_name = constants.Metric.COHERENCE def __init__(self, *, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=Coherence._metric_name, version=version, diff --git a/vertexai/preview/evaluation/metrics/_fluency.py b/vertexai/preview/evaluation/metrics/_fluency.py index cdc15064e5..eb7763a0a7 100644 --- a/vertexai/preview/evaluation/metrics/_fluency.py +++ b/vertexai/preview/evaluation/metrics/_fluency.py @@ -15,9 +15,17 @@ # limitations under the License. # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class `Fluency` will" + " result in an error. Please use string metric name `fluency` or define" + " a PointwiseMetric instead." +) + class Fluency(_base._ModelBasedMetric): """The model-based pointwise metric for Fluency.""" @@ -25,6 +33,7 @@ class Fluency(_base._ModelBasedMetric): _metric_name = constants.Metric.FLUENCY def __init__(self, *, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=Fluency._metric_name, version=version, diff --git a/vertexai/preview/evaluation/metrics/_fulfillment.py b/vertexai/preview/evaluation/metrics/_fulfillment.py index a9e179212a..f68218cc98 100644 --- a/vertexai/preview/evaluation/metrics/_fulfillment.py +++ b/vertexai/preview/evaluation/metrics/_fulfillment.py @@ -15,9 +15,17 @@ # limitations under the License. # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class `Fulfillment` will" + " result in an error. Please use string metric name `fulfillment` or define" + " a PointwiseMetric instead." +) + class Fulfillment(_base._ModelBasedMetric): """The model-based pointwise metric for Fulfillment.""" @@ -25,6 +33,7 @@ class Fulfillment(_base._ModelBasedMetric): _metric_name = constants.Metric.FULFILLMENT def __init__(self, *, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=Fulfillment._metric_name, version=version, diff --git a/vertexai/preview/evaluation/metrics/_groundedness.py b/vertexai/preview/evaluation/metrics/_groundedness.py index 4b4fa1f99d..d8f7994a21 100644 --- a/vertexai/preview/evaluation/metrics/_groundedness.py +++ b/vertexai/preview/evaluation/metrics/_groundedness.py @@ -15,9 +15,17 @@ # limitations under the License. # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class `Groundedness` will" + " result in an error. Please use string metric name `groundedness` or define" + " a PointwiseMetric instead." +) + class Groundedness(_base._ModelBasedMetric): """The model-based pointwise metric for Groundedness.""" @@ -25,6 +33,7 @@ class Groundedness(_base._ModelBasedMetric): _metric_name = constants.Metric.GROUNDEDNESS def __init__(self, *, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=Groundedness._metric_name, version=version, diff --git a/vertexai/preview/evaluation/metrics/_instance_evaluation.py b/vertexai/preview/evaluation/metrics/_instance_evaluation.py index de8bc0b422..70277c94c1 100644 --- a/vertexai/preview/evaluation/metrics/_instance_evaluation.py +++ b/vertexai/preview/evaluation/metrics/_instance_evaluation.py @@ -620,11 +620,10 @@ def handle_response( return result -# TODO(b/346659152): Add interface to customize rate limit. -@utils.rate_limit(constants.QuotaLimit.EVAL_SERVICE_QPS) def evaluate_instances( client: gapic_evaluation_services.EvaluationServiceClient, request: gapic_eval_service_types.EvaluateInstancesRequest, + rate_limiter: utils.RateLimiter, retry_timeout: float, ) -> gapic_eval_service_types.EvaluateInstancesResponse: """Evaluates an instance. @@ -632,12 +631,13 @@ def evaluate_instances( Args: client: The client to use for evaluation. request: An EvaluateInstancesRequest. + rate_limiter: The rate limiter to use for evaluation service requests. retry_timeout: How long to keep retrying the evaluation requests, in seconds. Returns: A response from the evaluation service. """ - + rate_limiter.sleep_and_advance() return client.evaluate_instances( request=request, retry=api_core.retry.Retry( diff --git a/vertexai/preview/evaluation/metrics/_pairwise_question_answering_quality.py b/vertexai/preview/evaluation/metrics/_pairwise_question_answering_quality.py index 4ce578a8b2..6ad96c3817 100644 --- a/vertexai/preview/evaluation/metrics/_pairwise_question_answering_quality.py +++ b/vertexai/preview/evaluation/metrics/_pairwise_question_answering_quality.py @@ -16,10 +16,19 @@ # from typing import Callable, Optional, Union +import warnings + from vertexai.generative_models import _generative_models from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `PairwiseQuestionAnsweringQuality` will result in an error. Please use" + " string metric name `pairwise_question_answering_quality` or define a" + " PairwiseMetric instead." +) + class PairwiseQuestionAnsweringQuality(_base.PairwiseMetric): """The Side-by-side(SxS) Pairwise Metric for Question Answering Quality.""" @@ -35,6 +44,7 @@ def __init__( use_reference: bool = False, version: Optional[int] = None ): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=PairwiseQuestionAnsweringQuality._metric_name, baseline_model=baseline_model, diff --git a/vertexai/preview/evaluation/metrics/_pairwise_summarization_quality.py b/vertexai/preview/evaluation/metrics/_pairwise_summarization_quality.py index 6f1737949a..4ecc38f26a 100644 --- a/vertexai/preview/evaluation/metrics/_pairwise_summarization_quality.py +++ b/vertexai/preview/evaluation/metrics/_pairwise_summarization_quality.py @@ -16,10 +16,19 @@ # from typing import Callable, Optional, Union +import warnings + from vertexai.generative_models import _generative_models from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `PairwiseSummarizationQuality` will result in an error. Please use" + " string metric name `pairwise_summarization_quality` or define a" + " PairwiseMetric instead." +) + class PairwiseSummarizationQuality(_base.PairwiseMetric): """The Side-by-side(SxS) Pairwise Metric for summarization quality.""" @@ -35,6 +44,7 @@ def __init__( use_reference: bool = False, version: Optional[int] = None ): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=PairwiseSummarizationQuality._metric_name, baseline_model=baseline_model, diff --git a/vertexai/preview/evaluation/metrics/_question_answering_correctness.py b/vertexai/preview/evaluation/metrics/_question_answering_correctness.py index e21e51a221..9e5b6ed4fb 100644 --- a/vertexai/preview/evaluation/metrics/_question_answering_correctness.py +++ b/vertexai/preview/evaluation/metrics/_question_answering_correctness.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `QuestionAnsweringCorrectness` will result in an error. Please use" + " string metric name `question_answering_correctness` or define a" + " PointwiseMetric instead." +) + class QuestionAnsweringCorrectness(_base._ModelBasedMetric): """The model-based pointwise metric for Question Answering Correctness.""" @@ -26,6 +35,7 @@ class QuestionAnsweringCorrectness(_base._ModelBasedMetric): _metric_name = constants.Metric.QUESTION_ANSWERING_CORRECTNESS def __init__(self, *, use_reference: bool = True, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=QuestionAnsweringCorrectness._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_question_answering_helpfulness.py b/vertexai/preview/evaluation/metrics/_question_answering_helpfulness.py index 79b0d62778..8a1a5f541e 100644 --- a/vertexai/preview/evaluation/metrics/_question_answering_helpfulness.py +++ b/vertexai/preview/evaluation/metrics/_question_answering_helpfulness.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `QuestionAnsweringHelpfulness` will result in an error. Please use" + " string metric name `question_answering_helpfulness` or define a" + " PointwiseMetric instead." +) + class QuestionAnsweringHelpfulness(_base._ModelBasedMetric): """The model-based pointwise metric for Question Answering Helpfulness.""" @@ -26,6 +35,7 @@ class QuestionAnsweringHelpfulness(_base._ModelBasedMetric): _metric_name = constants.Metric.QUESTION_ANSWERING_HELPFULNESS def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=QuestionAnsweringHelpfulness._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_question_answering_quality.py b/vertexai/preview/evaluation/metrics/_question_answering_quality.py index 90cfdcc1cd..a826635797 100644 --- a/vertexai/preview/evaluation/metrics/_question_answering_quality.py +++ b/vertexai/preview/evaluation/metrics/_question_answering_quality.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `QuestionAnsweringQuality` will result in an error. Please use" + " string metric name `question_answering_quality` or define a" + " PointwiseMetric instead." +) + class QuestionAnsweringQuality(_base._ModelBasedMetric): """The model-based pointwise metric for Question Answering Quality.""" @@ -26,6 +35,7 @@ class QuestionAnsweringQuality(_base._ModelBasedMetric): _metric_name = constants.Metric.QUESTION_ANSWERING_QUALITY def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=QuestionAnsweringQuality._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_question_answering_relevance.py b/vertexai/preview/evaluation/metrics/_question_answering_relevance.py index 4ec3160726..2459cbba27 100644 --- a/vertexai/preview/evaluation/metrics/_question_answering_relevance.py +++ b/vertexai/preview/evaluation/metrics/_question_answering_relevance.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `QuestionAnsweringRelevance` will result in an error. Please use" + " string metric name `question_answering_relevance` or define a" + " PointwiseMetric instead." +) + class QuestionAnsweringRelevance(_base._ModelBasedMetric): """The model-based pointwise metric for Question Answering Relevance.""" @@ -26,6 +35,7 @@ class QuestionAnsweringRelevance(_base._ModelBasedMetric): _metric_name = constants.Metric.QUESTION_ANSWERING_RELEVANCE def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=QuestionAnsweringRelevance._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_safety.py b/vertexai/preview/evaluation/metrics/_safety.py index 2f952a15b4..4b7b3fbc6c 100644 --- a/vertexai/preview/evaluation/metrics/_safety.py +++ b/vertexai/preview/evaluation/metrics/_safety.py @@ -15,9 +15,18 @@ # limitations under the License. # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `Safety` will result in an error. Please use" + " string metric name `safety` or define a" + " PointwiseMetric instead." +) + class Safety(_base._ModelBasedMetric): """The model-based pointwise metric for Safety.""" @@ -25,6 +34,7 @@ class Safety(_base._ModelBasedMetric): _metric_name = constants.Metric.SAFETY def __init__(self, *, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=Safety._metric_name, version=version, diff --git a/vertexai/preview/evaluation/metrics/_summarization_helpfulness.py b/vertexai/preview/evaluation/metrics/_summarization_helpfulness.py index a538fd8858..9bc381c768 100644 --- a/vertexai/preview/evaluation/metrics/_summarization_helpfulness.py +++ b/vertexai/preview/evaluation/metrics/_summarization_helpfulness.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `SummarizationHelpfulness` will result in an error. Please use" + " string metric name `summarization_helpfulness` or define a" + " PointwiseMetric instead." +) + class SummarizationHelpfulness(_base._ModelBasedMetric): """The model-based pointwise metric for Summarization Helpfulness.""" @@ -26,6 +35,7 @@ class SummarizationHelpfulness(_base._ModelBasedMetric): _metric_name = constants.Metric.SUMMARIZATION_HELPFULNESS def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=SummarizationHelpfulness._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_summarization_quality.py b/vertexai/preview/evaluation/metrics/_summarization_quality.py index 9d19f9f4f9..540a23dbba 100644 --- a/vertexai/preview/evaluation/metrics/_summarization_quality.py +++ b/vertexai/preview/evaluation/metrics/_summarization_quality.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `SummarizationQuality` will result in an error. Please use" + " string metric name `summarization_quality` or define a" + " PointwiseMetric instead." +) + class SummarizationQuality(_base._ModelBasedMetric): """The model-based pointwise metric for Summarization Quality.""" @@ -26,6 +35,7 @@ class SummarizationQuality(_base._ModelBasedMetric): _metric_name = constants.Metric.SUMMARIZATION_QUALITY def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=SummarizationQuality._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/metrics/_summarization_verbosity.py b/vertexai/preview/evaluation/metrics/_summarization_verbosity.py index 75bcfc3f8f..03fbea59b3 100644 --- a/vertexai/preview/evaluation/metrics/_summarization_verbosity.py +++ b/vertexai/preview/evaluation/metrics/_summarization_verbosity.py @@ -16,9 +16,18 @@ # from typing import Optional +import warnings + from vertexai.preview.evaluation import constants from vertexai.preview.evaluation.metrics import _base +_DEPRECATION_WARNING_MESSAGE = ( + "After google-cloud-aiplatform>1.63.0, using metric class" + " `SummarizationVerbosity` will result in an error. Please use" + " string metric name `summarization_verbosity` or define a" + " PointwiseMetric instead." +) + class SummarizationVerbosity(_base._ModelBasedMetric): """The model-based pointwise metric for Summarization Verbosity.""" @@ -26,6 +35,7 @@ class SummarizationVerbosity(_base._ModelBasedMetric): _metric_name = constants.Metric.SUMMARIZATION_VERBOSITY def __init__(self, *, use_reference: bool = False, version: Optional[int] = None): + warnings.warn(message=_DEPRECATION_WARNING_MESSAGE) super().__init__( metric=SummarizationVerbosity._metric_name, use_reference=use_reference, diff --git a/vertexai/preview/evaluation/utils.py b/vertexai/preview/evaluation/utils.py index f344ec3134..c749850341 100644 --- a/vertexai/preview/evaluation/utils.py +++ b/vertexai/preview/evaluation/utils.py @@ -73,7 +73,7 @@ def __init__(self, rate: Optional[float] = None): Raises: ValueError: If the rate is not positive. """ - if rate <= 0: + if not rate or rate <= 0: raise ValueError("Rate must be a positive number") self.seconds_per_event = 1.0 / rate self.last = time.time() - self.seconds_per_event diff --git a/vertexai/preview/rag/__init__.py b/vertexai/preview/rag/__init__.py index 076dd63a95..fff380b359 100644 --- a/vertexai/preview/rag/__init__.py +++ b/vertexai/preview/rag/__init__.py @@ -38,7 +38,13 @@ ) from vertexai.preview.rag.utils.resources import ( EmbeddingModelConfig, + JiraSource, + JiraQuery, + RagCorpus, + RagFile, RagResource, + SlackChannel, + SlackChannelsSource, ) @@ -58,4 +64,10 @@ "Retrieval", "VertexRagStore", "RagResource", + "RagFile", + "RagCorpus", + "JiraSource", + "JiraQuery", + "SlackChannel", + "SlackChannelsSource", ) diff --git a/vertexai/preview/rag/rag_data.py b/vertexai/preview/rag/rag_data.py index 80bb36dcd2..b370878d4b 100644 --- a/vertexai/preview/rag/rag_data.py +++ b/vertexai/preview/rag/rag_data.py @@ -44,8 +44,10 @@ ) from vertexai.preview.rag.utils.resources import ( EmbeddingModelConfig, + JiraSource, RagCorpus, RagFile, + SlackChannelsSource, ) @@ -59,10 +61,11 @@ def create_corpus( Example usage: ``` import vertexai + from vertexai.preview import rag vertexai.init(project="my-project") - rag_corpus = vertexai.preview.rag.create_corpus( + rag_corpus = rag.create_corpus( display_name="my-corpus-1", ) ``` @@ -133,6 +136,7 @@ def list_corpora( Example usage: ``` import vertexai + from vertexai.preview import rag vertexai.init(project="my-project") @@ -202,10 +206,11 @@ def upload_file( ``` import vertexai + from vertexai.preview import rag vertexai.init(project="my-project") - rag_file = vertexai.preview.rag.upload_file( + rag_file = rag.upload_file( corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1", display_name="my_file.txt", path="usr/home/my_file.txt", @@ -270,7 +275,8 @@ def upload_file( def import_files( corpus_name: str, - paths: Sequence[str], + paths: Optional[Sequence[str]] = None, + source: Optional[Union[SlackChannelsSource, JiraSource]] = None, chunk_size: int = 1024, chunk_overlap: int = 200, timeout: int = 600, @@ -283,19 +289,55 @@ def import_files( ``` import vertexai + from vertexai.preview import rag + from google.protobuf import timestamp_pb2 vertexai.init(project="my-project") # Google Drive example - paths = ["https://drive.google.com/file/123", "https://drive.google.com/file/456"] + paths = [ + "https://drive.google.com/file/d/123", + "https://drive.google.com/drive/folders/456" + ] # Google Cloud Storage example paths = ["gs://my_bucket/my_files_dir", ...] - response = vertexai.preview.rag.import_files( + response = rag.import_files( corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1", paths=paths, chunk_size=512, chunk_overlap=100, ) + + # Slack example + start_time = timestamp_pb2.Timestamp() + start_time.FromJsonString('2020-12-31T21:33:44Z') + end_time = timestamp_pb2.Timestamp() + end_time.GetCurrentTime() + source = rag.SlackChannelsSource( + channels = [ + SlackChannel("channel1", "api_key1"), + SlackChannel("channel2", "api_key2", start_time, end_time) + ], + ) + # Jira Example + jira_query = rag.JiraQuery( + email="xxx@yyy.com", + jira_projects=["project1", "project2"], + custom_queries=["query1", "query2"], + api_key="api_key", + server_uri="server.atlassian.net" + ) + source = rag.JiraSource( + queries=[jira_query], + ) + + response = rag.import_files( + corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1", + source=source, + chunk_size=512, + chunk_overlap=100, + ) + # Return the number of imported RagFiles after completion. print(response.imported_rag_files_count) @@ -304,10 +346,12 @@ def import_files( corpus_name: The name of the RagCorpus resource into which to import files. Format: ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` or ``{rag_corpus}``. - paths: A list of uris. Elligible uris will be Google Cloud Storage + paths: A list of uris. Eligible uris will be Google Cloud Storage directory ("gs://my-bucket/my_dir") or a Google Drive url for file (https://drive.google.com/file/... or folder "https://drive.google.com/corp/drive/folders/..."). + source: The source of the Slack or Jira import. + Must be either a SlackChannelsSource or JiraSource. chunk_size: The size of the chunks. chunk_overlap: The overlap between chunks. max_embedding_requests_per_min: @@ -323,10 +367,15 @@ def import_files( Returns: ImportRagFilesResponse. """ + if source is not None and paths is not None: + raise ValueError("Only one of source or paths must be passed in at a time") + if source is None and paths is None: + raise ValueError("One of source or paths must be passed in") corpus_name = _gapic_utils.get_corpus_name(corpus_name) request = _gapic_utils.prepare_import_files_request( corpus_name=corpus_name, paths=paths, + source=source, chunk_size=chunk_size, chunk_overlap=chunk_overlap, max_embedding_requests_per_min=max_embedding_requests_per_min, @@ -342,7 +391,8 @@ def import_files( async def import_files_async( corpus_name: str, - paths: Sequence[str], + paths: Optional[Sequence[str]] = None, + source: Optional[Union[SlackChannelsSource, JiraSource]] = None, chunk_size: int = 1024, chunk_overlap: int = 200, max_embedding_requests_per_min: int = 1000, @@ -354,21 +404,56 @@ async def import_files_async( ``` import vertexai + from vertexai.preview import rag + from google.protobuf import timestamp_pb2 vertexai.init(project="my-project") # Google Drive example - paths = ["https://drive.google.com/file/123", "https://drive.google.com/file/456"] + paths = [ + "https://drive.google.com/file/d/123", + "https://drive.google.com/drive/folders/456" + ] # Google Cloud Storage example paths = ["gs://my_bucket/my_files_dir", ...] - response = await vertexai.preview.rag.import_files_async( + response = await rag.import_files_async( corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1", paths=paths, chunk_size=512, chunk_overlap=100, ) + # Slack example + start_time = timestamp_pb2.Timestamp() + start_time.FromJsonString('2020-12-31T21:33:44Z') + end_time = timestamp_pb2.Timestamp() + end_time.GetCurrentTime() + source = rag.SlackChannelsSource( + channels = [ + SlackChannel("channel1", "api_key1"), + SlackChannel("channel2", "api_key2", start_time, end_time) + ], + ) + # Jira Example + jira_query = rag.JiraQuery( + email="xxx@yyy.com", + jira_projects=["project1", "project2"], + custom_queries=["query1", "query2"], + api_key="api_key", + server_uri="server.atlassian.net" + ) + source = rag.JiraSource( + queries=[jira_query], + ) + + response = await rag.import_files_async( + corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1", + source=source, + chunk_size=512, + chunk_overlap=100, + ) + # Get the result. await response.result() @@ -377,10 +462,12 @@ async def import_files_async( corpus_name: The name of the RagCorpus resource into which to import files. Format: ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` or ``{rag_corpus}``. - paths: A list of uris. Elligible uris will be Google Cloud Storage + paths: A list of uris. Eligible uris will be Google Cloud Storage directory ("gs://my-bucket/my_dir") or a Google Drive url for file (https://drive.google.com/file/... or folder "https://drive.google.com/corp/drive/folders/..."). + source: The source of the Slack or Jira import. + Must be either a SlackChannelsSource or JiraSource. chunk_size: The size of the chunks. chunk_overlap: The overlap between chunks. max_embedding_requests_per_min: @@ -395,10 +482,15 @@ async def import_files_async( Returns: operation_async.AsyncOperation. """ + if source is not None and paths is not None: + raise ValueError("Only one of source or paths must be passed in at a time") + if source is None and paths is None: + raise ValueError("One of source or paths must be passed in") corpus_name = _gapic_utils.get_corpus_name(corpus_name) request = _gapic_utils.prepare_import_files_request( corpus_name=corpus_name, paths=paths, + source=source, chunk_size=chunk_size, chunk_overlap=chunk_overlap, max_embedding_requests_per_min=max_embedding_requests_per_min, diff --git a/vertexai/preview/rag/utils/_gapic_utils.py b/vertexai/preview/rag/utils/_gapic_utils.py index eeb729848b..a46fad553a 100644 --- a/vertexai/preview/rag/utils/_gapic_utils.py +++ b/vertexai/preview/rag/utils/_gapic_utils.py @@ -15,7 +15,8 @@ # limitations under the License. # import re -from typing import Any, Dict, Sequence, Union +from typing import Any, Dict, Optional, Sequence, Union +from google.cloud.aiplatform_v1beta1.types import api_auth from google.cloud.aiplatform_v1beta1 import ( RagEmbeddingModelConfig, GoogleDriveSource, @@ -24,6 +25,8 @@ RagFileChunkingConfig, RagCorpus as GapicRagCorpus, RagFile as GapicRagFile, + SlackSource as GapicSlackSource, + JiraSource as GapicJiraSource, ) from google.cloud.aiplatform import initializer from google.cloud.aiplatform.utils import ( @@ -35,6 +38,8 @@ EmbeddingModelConfig, RagCorpus, RagFile, + SlackChannelsSource, + JiraSource, ) @@ -153,9 +158,62 @@ def convert_path_to_resource_id( ) +def convert_source_for_rag_import( + source: Union[SlackChannelsSource, JiraSource] +) -> Union[GapicSlackSource, GapicJiraSource]: + """Converts a SlackChannelsSource or JiraSource to a GapicSlackSource or GapicJiraSource.""" + if isinstance(source, SlackChannelsSource): + result_source_channels = [] + for channel in source.channels: + api_key = channel.api_key + cid = channel.channel_id + start_time = channel.start_time + end_time = channel.end_time + result_channels = GapicSlackSource.SlackChannels( + channels=[ + GapicSlackSource.SlackChannels.SlackChannel( + channel_id=cid, + start_time=start_time, + end_time=end_time, + ) + ], + api_key_config=api_auth.ApiAuth.ApiKeyConfig( + api_key_secret_version=api_key + ), + ) + result_source_channels.append(result_channels) + return GapicSlackSource( + channels=result_source_channels, + ) + elif isinstance(source, JiraSource): + result_source_queries = [] + for query in source.queries: + api_key = query.api_key + custom_queries = query.custom_queries + projects = query.jira_projects + email = query.email + server_uri = query.server_uri + result_query = GapicJiraSource.JiraQueries( + custom_queries=custom_queries, + projects=projects, + email=email, + server_uri=server_uri, + api_key_config=api_auth.ApiAuth.ApiKeyConfig( + api_key_secret_version=api_key + ), + ) + result_source_queries.append(result_query) + return GapicJiraSource( + jira_queries=result_source_queries, + ) + else: + raise TypeError("source must be a SlackChannelsSource or JiraSource.") + + def prepare_import_files_request( corpus_name: str, - paths: Sequence[str], + paths: Optional[Sequence[str]] = None, + source: Optional[Union[SlackChannelsSource, JiraSource]] = None, chunk_size: int = 1024, chunk_overlap: int = 200, max_embedding_requests_per_min: int = 1000, @@ -174,22 +232,28 @@ def prepare_import_files_request( max_embedding_requests_per_min=max_embedding_requests_per_min, ) - uris = [] - resource_ids = [] - for p in paths: - output = convert_path_to_resource_id(p) - if isinstance(output, str): - uris.append(p) - else: - resource_ids.append(output) - - if uris: - import_rag_files_config.gcs_source.uris = uris - if resource_ids: - google_drive_source = GoogleDriveSource( - resource_ids=resource_ids, - ) - import_rag_files_config.google_drive_source = google_drive_source + if source is not None: + gapic_source = convert_source_for_rag_import(source) + if isinstance(gapic_source, GapicSlackSource): + import_rag_files_config.slack_source = gapic_source + if isinstance(gapic_source, GapicJiraSource): + import_rag_files_config.jira_source = gapic_source + else: + uris = [] + resource_ids = [] + for p in paths: + output = convert_path_to_resource_id(p) + if isinstance(output, str): + uris.append(p) + else: + resource_ids.append(output) + if uris: + import_rag_files_config.gcs_source.uris = uris + if resource_ids: + google_drive_source = GoogleDriveSource( + resource_ids=resource_ids, + ) + import_rag_files_config.google_drive_source = google_drive_source request = ImportRagFilesRequest( parent=corpus_name, import_rag_files_config=import_rag_files_config diff --git a/vertexai/preview/rag/utils/resources.py b/vertexai/preview/rag/utils/resources.py index 0e18b79f51..1b5af451f6 100644 --- a/vertexai/preview/rag/utils/resources.py +++ b/vertexai/preview/rag/utils/resources.py @@ -16,7 +16,9 @@ # import dataclasses -from typing import List, Optional +from typing import List, Optional, Sequence + +from google.protobuf import timestamp_pb2 @dataclasses.dataclass @@ -102,3 +104,66 @@ class RagResource: rag_corpus: Optional[str] = None rag_file_ids: Optional[List[str]] = None + + +@dataclasses.dataclass +class SlackChannel: + """SlackChannel. + + Attributes: + channel_id: The Slack channel ID. + api_key: The SecretManager resource name for the Slack API token. Format: + ``projects/{project}/secrets/{secret}/versions/{version}`` + See: https://api.slack.com/tutorials/tracks/getting-a-token. + start_time: The starting timestamp for messages to import. + end_time: The ending timestamp for messages to import. + """ + + channel_id: str + api_key: str + start_time: Optional[timestamp_pb2.Timestamp] = None + end_time: Optional[timestamp_pb2.Timestamp] = None + + +@dataclasses.dataclass +class SlackChannelsSource: + """SlackChannelsSource. + + Attributes: + channels: The Slack channels. + """ + + channels: Sequence[SlackChannel] + + +@dataclasses.dataclass +class JiraQuery: + """JiraQuery. + + Attributes: + email: The Jira email address. + jira_projects: A list of Jira projects to import in their entirety. + custom_queries: A list of custom JQL Jira queries to import. + api_key: The SecretManager version resource name for Jira API access. Format: + ``projects/{project}/secrets/{secret}/versions/{version}`` + See: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/ + server_uri: The Jira server URI. Format: + ``{server}.atlassian.net`` + """ + + email: str + jira_projects: Sequence[str] + custom_queries: Sequence[str] + api_key: str + server_uri: str + + +@dataclasses.dataclass +class JiraSource: + """JiraSource. + + Attributes: + queries: The Jira queries. + """ + + queries: Sequence[JiraQuery] diff --git a/vertexai/preview/reasoning_engines/templates/langchain.py b/vertexai/preview/reasoning_engines/templates/langchain.py index fbaa5a41dc..1b48dc64b2 100644 --- a/vertexai/preview/reasoning_engines/templates/langchain.py +++ b/vertexai/preview/reasoning_engines/templates/langchain.py @@ -114,6 +114,7 @@ def _default_model_builder( def _default_runnable_builder( model: "BaseLanguageModel", *, + system_instruction: Optional[str] = None, tools: Optional[Sequence["_ToolLike"]] = None, prompt: Optional["RunnableSerializable"] = None, output_parser: Optional["RunnableSerializable"] = None, @@ -131,7 +132,10 @@ def _default_runnable_builder( # user would reflect that is by setting chat_history (which defaults to # None). has_history: bool = chat_history is not None - prompt = prompt or _default_prompt(has_history) + prompt = prompt or _default_prompt( + has_history=has_history, + system_instruction=system_instruction, + ) output_parser = output_parser or _default_output_parser() model_tool_kwargs = model_tool_kwargs or {} agent_executor_kwargs = agent_executor_kwargs or {} @@ -162,7 +166,10 @@ def _default_runnable_builder( return agent_executor -def _default_prompt(has_history: bool) -> "RunnableSerializable": +def _default_prompt( + has_history: bool, + system_instruction: Optional[str] = None, +) -> "RunnableSerializable": from langchain_core import prompts try: @@ -173,6 +180,10 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable": format_to_openai_tool_messages as format_to_tool_messages, ) + system_instructions = [] + if system_instruction: + system_instructions = [("system", system_instruction)] + if has_history: return { "history": lambda x: x["history"], @@ -181,7 +192,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable": lambda x: format_to_tool_messages(x["intermediate_steps"]) ), } | prompts.ChatPromptTemplate.from_messages( - [ + system_instructions + + [ prompts.MessagesPlaceholder(variable_name="history"), ("user", "{input}"), prompts.MessagesPlaceholder(variable_name="agent_scratchpad"), @@ -194,7 +206,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable": lambda x: format_to_tool_messages(x["intermediate_steps"]) ), } | prompts.ChatPromptTemplate.from_messages( - [ + system_instructions + + [ ("user", "{input}"), prompts.MessagesPlaceholder(variable_name="agent_scratchpad"), ] @@ -265,6 +278,7 @@ def __init__( self, model: str, *, + system_instruction: Optional[str] = None, prompt: Optional["RunnableSerializable"] = None, tools: Optional[Sequence["_ToolLike"]] = None, output_parser: Optional["RunnableSerializable"] = None, @@ -319,6 +333,9 @@ def __init__( Args: model (str): Optional. The name of the model (e.g. "gemini-1.0-pro"). + system_instruction (str): + Optional. The system instruction to use for the agent. This + argument should not be specified if `prompt` is specified. prompt (langchain_core.runnables.RunnableSerializable): Optional. The prompt template for the model. Defaults to a ChatPromptTemplate. @@ -394,6 +411,7 @@ def __init__( False. Raises: + ValueError: If both `prompt` and `system_instruction` are specified. TypeError: If there is an invalid tool (e.g. function with an input that did not specify its type). """ @@ -407,7 +425,14 @@ def __init__( # they are deployed. _validate_tools(tools) self._tools = tools + if prompt and system_instruction: + raise ValueError( + "Only one of `prompt` or `system_instruction` should be specified. " + "Consider incorporating the system instruction into the prompt " + "rather than passing it separately as an argument." + ) self._model_name = model + self._system_instruction = system_instruction self._prompt = prompt self._output_parser = output_parser self._chat_history = chat_history @@ -528,6 +553,7 @@ def set_up(self): prompt=self._prompt, model=self._model, tools=self._tools, + system_instruction=self._system_instruction, output_parser=self._output_parser, chat_history=self._chat_history, model_tool_kwargs=self._model_tool_kwargs, diff --git a/vertexai/reasoning_engines/_reasoning_engines.py b/vertexai/reasoning_engines/_reasoning_engines.py index 9b0587a3d7..05c37cc685 100644 --- a/vertexai/reasoning_engines/_reasoning_engines.py +++ b/vertexai/reasoning_engines/_reasoning_engines.py @@ -20,14 +20,17 @@ import sys import tarfile import typing -from typing import Optional, Protocol, Sequence, Union +from typing import Optional, Protocol, Sequence, Union, List from google.api_core import exceptions +from google.cloud import storage from google.cloud.aiplatform import base from google.cloud.aiplatform import initializer from google.cloud.aiplatform import utils as aip_utils from google.cloud.aiplatform_v1beta1 import types +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_service from vertexai.reasoning_engines import _utils +from google.protobuf import field_mask_pb2 _LOGGER = base.Logger(__name__) @@ -179,71 +182,29 @@ def create( ValueError: If the `staging_bucket` does not start with "gs://". FileNotFoundError: If `extra_packages` includes a file or directory that does not exist. + IOError: If requirements is a string that corresponds to a + nonexistent file. """ if not sys_version: sys_version = f"{sys.version_info.major}.{sys.version_info.minor}" - if sys_version not in _SUPPORTED_PYTHON_VERSIONS: - raise ValueError( - f"Unsupported python version: {sys_version}. ReasoningEngine " - f"only supports {_SUPPORTED_PYTHON_VERSIONS} at the moment." - ) + _validate_sys_version_or_raise(sys_version) + reasoning_engine = _validate_reasoning_engine_or_raise(reasoning_engine) + requirements = _validate_requirements_or_raise(requirements) + extra_packages = _validate_extra_packages_or_raise(extra_packages) + if reasoning_engine_name: _LOGGER.warning( "ReasoningEngine does not support user-defined resource IDs at " f"the moment. Therefore {reasoning_engine_name=} would be " "ignored and a random ID will be generated instead." ) - if sys_version != f"{sys.version_info.major}.{sys.version_info.minor}": - _LOGGER.warning( - f"{sys_version=} is inconsistent with {sys.version_info=}. " - "This might result in issues with deployment, and should only " - "be used as a workaround for advanced cases." - ) sdk_resource = cls.__new__(cls) base.VertexAiResourceNounWithFutureManager.__init__( sdk_resource, resource_name=reasoning_engine_name, ) staging_bucket = initializer.global_config.staging_bucket - if not staging_bucket: - raise ValueError( - "Please provide a `staging_bucket` in `vertexai.init(...)`" - ) - if not staging_bucket.startswith("gs://"): - raise ValueError(f"{staging_bucket=} must start with `gs://`") - if not ( - hasattr(reasoning_engine, "query") and callable(reasoning_engine.query) - ): - raise TypeError( - "reasoning_engine does not have a callable method named `query`" - ) - try: - inspect.signature(reasoning_engine.query) - except ValueError as err: - raise ValueError( - "Invalid query signature. This might be due to a missing " - "`self` argument in the reasoning_engine.query method." - ) from err - if isinstance(reasoning_engine, Cloneable): - # Avoid undeployable ReasoningChain states. - reasoning_engine = reasoning_engine.clone() - if isinstance(requirements, str): - try: - _LOGGER.info(f"Reading requirements from {requirements=}") - with open(requirements) as f: - requirements = f.read().splitlines() - _LOGGER.info(f"Read the following lines: {requirements}") - except IOError as err: - raise IOError( - f"Failed to read requirements from {requirements=}" - ) from err - requirements = requirements or [] - extra_packages = extra_packages or [] - for extra_package in extra_packages: - if not os.path.exists(extra_package): - raise FileNotFoundError( - f"Extra package specified but not found: {extra_package=}" - ) + _validate_staging_bucket_or_raise(staging_bucket) # Prepares the Reasoning Engine for creation in Vertex AI. # This involves packaging and uploading the artifacts for # reasoning_engine, requirements and extra_packages to @@ -257,6 +218,7 @@ def create( gcs_dir_name=gcs_dir_name, extra_packages=extra_packages, ) + # Update the package spec. package_spec = types.ReasoningEngineSpec.PackageSpec( python_version=sys_version, pickle_object_gcs_uri="{}/{}/{}".format( @@ -284,11 +246,6 @@ def create( reasoning_engine.query, schema_name=f"{type(reasoning_engine).__name__}_query", ) - # Note: we append the schema post-initialization to avoid upstream - # issues in marshaling the data that would result in errors like: - # ../../../../../proto/marshal/rules/struct.py:140: in to_proto - # self._marshal.to_proto(struct_pb2.Value, v) for k, v in value.items() - # E AttributeError: 'list' object has no attribute 'items' reasoning_engine_spec.class_methods.append(_utils.to_proto(schema_dict)) except Exception as e: _LOGGER.warning(f"failed to generate schema: {e}") @@ -324,11 +281,133 @@ def create( sdk_resource._operation_schemas = None return sdk_resource + def update( + self, + *, + reasoning_engine: Optional[Queryable] = None, + requirements: Optional[Union[str, Sequence[str]]] = None, + display_name: Optional[str] = None, + description: Optional[str] = None, + gcs_dir_name: str = _DEFAULT_GCS_DIR_NAME, + sys_version: Optional[str] = None, + extra_packages: Optional[Sequence[str]] = None, + ) -> "ReasoningEngine": + """Updates an existing ReasoningEngine. + + This method updates the configuration of an existing ReasoningEngine + running remotely, which is identified by its resource name. + Unlike the `create` function which requires a `reasoning_engine` object, + all arguments in this method are optional. + This method allows you to modify individual aspects of the configuration + by providing any of the optional arguments. + Note that you must provide at least one argument (except `sys_version`). + + Args: + reasoning_engine (ReasoningEngineInterface): + Optional. The Reasoning Engine to be replaced. + requirements (Union[str, Sequence[str]]): + Optional. The set of PyPI dependencies needed. It can either be + the path to a single file (requirements.txt), or an ordered list + of strings corresponding to each line of the requirements file. + display_name (str): + Optional. The user-defined name of the Reasoning Engine. + The name can be up to 128 characters long and can comprise any + UTF-8 character. + description (str): + Optional. The description of the Reasoning Engine. + gcs_dir_name (CreateReasoningEngineOptions): + Optional. The GCS bucket directory under `staging_bucket` to + use for staging the artifacts needed. + sys_version (str): + Optional. The Python system version used. Currently updating + sys version is not supported. + extra_packages (Sequence[str]): + Optional. The set of extra user-provided packages (if any). + + Returns: + ReasoningEngine: The Reasoning Engine that was updated. + + Raises: + ValueError: If `sys.version` is updated. + ValueError: If the `staging_bucket` was not set using vertexai.init. + ValueError: If the `staging_bucket` does not start with "gs://". + FileNotFoundError: If `extra_packages` includes a file or directory + that does not exist. + ValueError: if none of `display_name`, `description`, + `requirements`, `extra_packages`, or `reasoning_engine` were + specified. + IOError: If requirements is a string that corresponds to a + nonexistent file. + """ + staging_bucket = initializer.global_config.staging_bucket + _validate_staging_bucket_or_raise(staging_bucket) + + # Validate the arguments. + if not any( + [ + reasoning_engine, + requirements, + extra_packages, + display_name, + description, + ] + ): + raise ValueError( + "At least one of `reasoning_engine`, `requirements`, " + "`extra_packages`, `display_name`, or `description` must be " + "specified." + ) + if sys_version: + _LOGGER.warning("Updated sys_version is not supported.") + if requirements: + requirements = _validate_requirements_or_raise(requirements) + if extra_packages: + extra_packages = _validate_extra_packages_or_raise(extra_packages) + if reasoning_engine: + reasoning_engine = _validate_reasoning_engine_or_raise(reasoning_engine) + + # Prepares the Reasoning Engine for creation in Vertex AI. + # This involves packaging and uploading the artifacts for + # reasoning_engine, requirements and extra_packages to + # `staging_bucket/gcs_dir_name`. + _prepare( + reasoning_engine=reasoning_engine, + requirements=requirements, + project=self.project, + location=self.location, + staging_bucket=staging_bucket, + gcs_dir_name=gcs_dir_name, + extra_packages=extra_packages, + ) + update_request = _generate_update_request_or_raise( + resource_name=self.resource_name, + staging_bucket=staging_bucket, + gcs_dir_name=gcs_dir_name, + reasoning_engine=reasoning_engine, + requirements=requirements, + extra_packages=extra_packages, + display_name=display_name, + description=description, + ) + operation_future = self.api_client.update_reasoning_engine( + request=update_request + ) + _LOGGER.log_create_with_lro(ReasoningEngine, operation_future) + created_resource = operation_future.result() + _LOGGER.log_create_complete( + ReasoningEngine, + created_resource, + self._resource_noun, + module_name="vertexai.preview.reasoning_engines", + ) + self._operation_schemas = None + return self + def operation_schemas(self) -> Sequence[_utils.JsonDict]: """Returns the (Open)API schemas for the Reasoning Engine.""" spec = _utils.to_dict(self._gca_resource.spec) if self._operation_schemas is None: - self._operation_schemas = spec.get("classMethods", []) + self._operation_schemas = spec.get("class_methods", []) return self._operation_schemas def query(self, **kwargs) -> _utils.JsonDict: @@ -356,31 +435,75 @@ def query(self, **kwargs) -> _utils.JsonDict: return output -def _prepare( - reasoning_engine: Queryable, - requirements: Sequence[str], - project: str, - location: str, - staging_bucket: str, - gcs_dir_name: str, - extra_packages: Sequence[str], -) -> None: - """Prepares the reasoning engine for creation in Vertex AI. +def _validate_sys_version_or_raise(sys_version: str) -> None: + """Tries to validate the python system version.""" + if sys_version not in _SUPPORTED_PYTHON_VERSIONS: + raise ValueError( + f"Unsupported python version: {sys_version}. ReasoningEngine " + f"only supports {_SUPPORTED_PYTHON_VERSIONS} at the moment." + ) + if sys_version != f"{sys.version_info.major}.{sys.version_info.minor}": + _LOGGER.warning( + f"{sys_version=} is inconsistent with {sys.version_info=}. " + "This might result in issues with deployment, and should only " + "be used as a workaround for advanced cases." + ) - This involves packaging and uploading the artifacts to Cloud Storage. - Args: - reasoning_engine: The reasoning engine to be prepared. - requirements (Sequence[str]): The set of PyPI dependencies needed. - project (str): The project for the staging bucket. - location (str): The location for the staging bucket. - staging_bucket (str): The staging bucket name in the form "gs://...". - gcs_dir_name (str): The GCS bucket directory under `staging_bucket` to - use for staging the artifacts needed. - extra_packages (Sequence[str]): The set of extra user-provided packages. - """ +def _validate_staging_bucket_or_raise(staging_bucket: str) -> str: + """Tries to validate the staging bucket.""" + if not staging_bucket: + raise ValueError("Please provide a `staging_bucket` in `vertexai.init(...)`") + if not staging_bucket.startswith("gs://"): + raise ValueError(f"{staging_bucket=} must start with `gs://`") + + +def _validate_reasoning_engine_or_raise(reasoning_engine: Queryable) -> Queryable: + """Tries to validate the reasoning engine.""" + if not (hasattr(reasoning_engine, "query") and callable(reasoning_engine.query)): + raise TypeError( + "reasoning_engine does not have a callable method named `query`" + ) + try: + inspect.signature(reasoning_engine.query) + except ValueError as err: + raise ValueError( + "Invalid query signature. This might be due to a missing " + "`self` argument in the reasoning_engine.query method." + ) from err + if isinstance(reasoning_engine, Cloneable): + # Avoid undeployable ReasoningChain states. + reasoning_engine = reasoning_engine.clone() + return reasoning_engine + + +def _validate_requirements_or_raise(requirements: Sequence[str]) -> Sequence[str]: + """Tries to validate the requirements.""" + if isinstance(requirements, str): + try: + _LOGGER.info(f"Reading requirements from {requirements=}") + with open(requirements) as f: + requirements = f.read().splitlines() + _LOGGER.info(f"Read the following lines: {requirements}") + except IOError as err: + raise IOError(f"Failed to read requirements from {requirements=}") from err + return requirements or [] + + +def _validate_extra_packages_or_raise(extra_packages: Sequence[str]) -> Sequence[str]: + """Tries to validates the extra packages.""" + extra_packages = extra_packages or [] + for extra_package in extra_packages: + if not os.path.exists(extra_package): + raise FileNotFoundError( + f"Extra package specified but not found: {extra_package=}" + ) + return extra_packages + + +def _get_gcs_bucket(project: str, location: str, staging_bucket: str) -> storage.Bucket: + """Gets or creates the GCS bucket.""" storage = _utils._import_cloud_storage_or_raise() - cloudpickle = _utils._import_cloudpickle_or_raise() storage_client = storage.Client(project=project) staging_bucket = staging_bucket.replace("gs://", "") try: @@ -390,18 +513,41 @@ def _prepare( new_bucket = storage_client.bucket(staging_bucket) gcs_bucket = storage_client.create_bucket(new_bucket, location=location) _LOGGER.info(f"Creating bucket {staging_bucket} in {location=}") + return gcs_bucket + +def _upload_reasoning_engine( + reasoning_engine: Queryable, + gcs_bucket: storage.Bucket, + gcs_dir_name: str, +) -> None: + """Uploads the reasoning engine to GCS.""" + cloudpickle = _utils._import_cloudpickle_or_raise() blob = gcs_bucket.blob(f"{gcs_dir_name}/{_BLOB_FILENAME}") with blob.open("wb") as f: cloudpickle.dump(reasoning_engine, f) - dir_name = f"gs://{staging_bucket}/{gcs_dir_name}" + dir_name = f"gs://{gcs_bucket.name}/{gcs_dir_name}" _LOGGER.info(f"Writing to {dir_name}/{_BLOB_FILENAME}") + +def _upload_requirements( + requirements: Sequence[str], + gcs_bucket: storage.Bucket, + gcs_dir_name: str, +) -> None: + """Uploads the requirements file to GCS.""" blob = gcs_bucket.blob(f"{gcs_dir_name}/{_REQUIREMENTS_FILE}") - if requirements: - blob.upload_from_string("\n".join(requirements)) - _LOGGER.info(f"Writing to {dir_name}/{_REQUIREMENTS_FILE}") + blob.upload_from_string("\n".join(requirements)) + dir_name = f"gs://{gcs_bucket.name}/{gcs_dir_name}" + _LOGGER.info(f"Writing to {dir_name}/{_REQUIREMENTS_FILE}") + +def _upload_extra_packages( + extra_packages: Sequence[str], + gcs_bucket: storage.Bucket, + gcs_dir_name: str, +) -> None: + """Uploads extra packages to GCS.""" _LOGGER.info("Creating in-memory tarfile of extra_packages") tar_fileobj = io.BytesIO() with tarfile.open(fileobj=tar_fileobj, mode="w|gz") as tar: @@ -410,4 +556,108 @@ def _prepare( tar_fileobj.seek(0) blob = gcs_bucket.blob(f"{gcs_dir_name}/{_EXTRA_PACKAGES_FILE}") blob.upload_from_string(tar_fileobj.read()) + dir_name = f"gs://{gcs_bucket.name}/{gcs_dir_name}" _LOGGER.info(f"Writing to {dir_name}/{_EXTRA_PACKAGES_FILE}") + + +def _prepare( + reasoning_engine: Queryable, + requirements: Sequence[str], + extra_packages: Sequence[str], + project: str, + location: str, + staging_bucket: str, + gcs_dir_name: str, +) -> None: + """Prepares the reasoning engine for creation in Vertex AI. + + This involves packaging and uploading the artifacts to Cloud Storage. + + Args: + reasoning_engine: The reasoning engine to be prepared. + requirements (Sequence[str]): The set of PyPI dependencies needed. + extra_packages (Sequence[str]): The set of extra user-provided packages. + project (str): The project for the staging bucket. + location (str): The location for the staging bucket. + staging_bucket (str): The staging bucket name in the form "gs://...". + gcs_dir_name (str): The GCS bucket directory under `staging_bucket` to + use for staging the artifacts needed. + """ + gcs_bucket = _get_gcs_bucket(project, location, staging_bucket) + if reasoning_engine: + _upload_reasoning_engine(reasoning_engine, gcs_bucket, gcs_dir_name) + if requirements: + _upload_requirements(requirements, gcs_bucket, gcs_dir_name) + if extra_packages: + _upload_extra_packages(extra_packages, gcs_bucket, gcs_dir_name) + + +def _generate_update_request_or_raise( + resource_name: str, + staging_bucket: str, + gcs_dir_name: str = _DEFAULT_GCS_DIR_NAME, + reasoning_engine: Optional[Queryable] = None, + requirements: Optional[Union[str, Sequence[str]]] = None, + extra_packages: Optional[Sequence[str]] = None, + display_name: Optional[str] = None, + description: Optional[str] = None, +) -> reasoning_engine_service.UpdateReasoningEngineRequest: + """Tries to generates the update request for the reasoning engine.""" + is_spec_update = False + update_masks: List[str] = [] + reasoning_engine_spec = types.ReasoningEngineSpec() + package_spec = types.ReasoningEngineSpec.PackageSpec() + if requirements: + is_spec_update = True + update_masks.append("spec.package_spec.requirements_gcs_uri") + package_spec.requirements_gcs_uri = "{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _REQUIREMENTS_FILE, + ) + if extra_packages: + is_spec_update = True + update_masks.append("spec.package_spec.dependency_files_gcs_uri") + package_spec.dependency_files_gcs_uri = "{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _EXTRA_PACKAGES_FILE, + ) + if reasoning_engine: + is_spec_update = True + update_masks.append("spec.package_spec.pickle_object_gcs_uri") + package_spec.pickle_object_gcs_uri = "{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _BLOB_FILENAME, + ) + try: + schema_dict = _utils.generate_schema( + reasoning_engine.query, + schema_name=f"{type(reasoning_engine).__name__}_query", + ) + reasoning_engine_spec.class_methods.append(_utils.to_proto(schema_dict)) + except Exception as e: + _LOGGER.warning(f"failed to generate schema: {e}") + update_masks.append("spec.class_methods") + + reasoning_engine_message = types.ReasoningEngine(name=resource_name) + if is_spec_update: + reasoning_engine_spec.package_spec = package_spec + reasoning_engine_message.spec = reasoning_engine_spec + if display_name: + reasoning_engine_message.display_name = display_name + update_masks.append("display_name") + if description: + reasoning_engine_message.description = description + update_masks.append("description") + if not update_masks: + raise ValueError( + "At least one of `reasoning_engine`, `requirements`, " + "`extra_packages`, `display_name`, or `description` must be " + "specified." + ) + return reasoning_engine_service.UpdateReasoningEngineRequest( + reasoning_engine=reasoning_engine_message, + update_mask=field_mask_pb2.FieldMask(paths=update_masks), + ) diff --git a/vertexai/vision_models/__init__.py b/vertexai/vision_models/__init__.py index 77033f329c..1834b5cebb 100644 --- a/vertexai/vision_models/__init__.py +++ b/vertexai/vision_models/__init__.py @@ -15,8 +15,11 @@ """Classes for working with vision models.""" from vertexai.vision_models._vision_models import ( + GeneratedImage, Image, ImageCaptioningModel, + ImageGenerationModel, + ImageGenerationResponse, ImageQnAModel, ImageTextModel, MultiModalEmbeddingModel, @@ -27,8 +30,11 @@ ) __all__ = [ + "GeneratedImage", "Image", "ImageCaptioningModel", + "ImageGenerationModel", + "ImageGenerationResponse", "ImageQnAModel", "ImageTextModel", "MultiModalEmbeddingModel",