From e108ef8250c77c8a8edeccb6b601cbe0b0380c89 Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Mon, 6 Dec 2021 16:14:09 -0800 Subject: [PATCH 01/12] fix: incorrect uri for IOD yaml (#889) --- .../prediction_service/predict_image_object_detection_sample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/prediction_service/predict_image_object_detection_sample.py b/samples/snippets/prediction_service/predict_image_object_detection_sample.py index b3bccc2921..1975b06a33 100644 --- a/samples/snippets/prediction_service/predict_image_object_detection_sample.py +++ b/samples/snippets/prediction_service/predict_image_object_detection_sample.py @@ -52,7 +52,7 @@ def predict_image_object_detection_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) From 578e06df481c3d60074a7b8e9365f8361b04e32b Mon Sep 17 00:00:00 2001 From: Vinny Senthil Date: Tue, 7 Dec 2021 10:34:49 -0800 Subject: [PATCH 02/12] fix: Minor docstring and snippet fixes (#873) * docs: Update batch prediction job instance, predict format docstrings * Update AutoML predict snippets to refer to correct YAML Co-authored-by: Dan Lee <71398022+dandhlee@users.noreply.github.com> --- .sample_configs/process_configs.yaml | 15 ++++++--------- google/cloud/aiplatform/jobs.py | 13 +++++++------ google/cloud/aiplatform/models.py | 13 +++++++------ .../predict_image_classification_sample.py | 2 +- .../predict_tabular_classification_sample.py | 2 +- .../predict_tabular_regression_sample.py | 2 +- ...ict_text_classification_single_label_sample.py | 2 +- .../predict_text_entity_extraction_sample.py | 2 +- .../predict_text_sentiment_analysis_sample.py | 2 +- 9 files changed, 26 insertions(+), 27 deletions(-) diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 4e6608b4fd..91c8b61c8f 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -266,7 +266,7 @@ predict_image_classification_sample: instance_dict: predict.instance.ImageClassificationPredictionInstance parameters_dict: predict.params.ImageClassificationPredictionParams comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_classification_1.0.0.yaml for the format of the predictions. predict_image_file_sample: max_depth: 1 @@ -278,7 +278,7 @@ predict_image_object_detection_sample: instance_dict: predict.instance.ImageObjectDetectionPredictionInstance parameters_dict: predict.params.ImageObjectDetectionPredictionParams comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection_1.0.0.yaml for the format of the predictions. predict_sample: max_depth: 1 @@ -290,30 +290,27 @@ predict_tabular_classification_sample: max_depth: 1 resource_name: endpoint comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_classification.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tabular_classification_1.0.0.yaml for the format of the predictions. predict_tabular_forecasting_sample: {} predict_tabular_regression_sample: max_depth: 1 resource_name: endpoint comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_regression.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tabular_regression_1.0.0.yaml for the format of the predictions. predict_text_classification_single_label_sample: max_depth: 1 resource_name: endpoint schema_types: instance_dict: predict.instance.TextClassificationPredictionInstance - comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_classification.yaml - for the format of the predictions. predict_text_entity_extraction_sample: max_depth: 1 resource_name: endpoint schema_types: instance_dict: predict.instance.TextExtractionPredictionInstance comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction_1.0.0.yaml for the format of the predictions. predict_text_sentiment_analysis_sample: max_depth: 1 @@ -321,7 +318,7 @@ predict_text_sentiment_analysis_sample: schema_types: instance_dict: predict.instance.TextSentimentPredictionInstance comments: - predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment.yaml + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment_1.0.0.yaml for the format of the predictions. search_migratable_resources_sample: {} undeploy_model_sample: diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 57958fc779..ae920ab2f3 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -380,13 +380,14 @@ def create( Or an instance of aiplatform.Model. instances_format (str): - Required. The format in which instances are given, must be one - of "jsonl", "csv", "bigquery", "tf-record", "tf-record-gzip", - or "file-list". Default is "jsonl" when using `gcs_source`. If a - `bigquery_source` is provided, this is overridden to "bigquery". + Required. The format in which instances are provided. Must be one + of the formats listed in `Model.supported_input_storage_formats`. + Default is "jsonl" when using `gcs_source`. If a `bigquery_source` + is provided, this is overridden to "bigquery". predictions_format (str): - Required. The format in which Vertex AI gives the - predictions, must be one of "jsonl", "csv", or "bigquery". + Required. The format in which Vertex AI outputs the + predictions, must be one of the formats specified in + `Model.supported_output_storage_formats`. Default is "jsonl" when using `gcs_destination_prefix`. If a `bigquery_destination_prefix` is provided, this is overridden to "bigquery". diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 87af6b16bf..17ddc5c70d 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -2099,10 +2099,10 @@ def batch_predict( BigQuery URI to a table, up to 2000 characters long. For example: `bq://projectId.bqDatasetId.bqTableId` instances_format: str = "jsonl" - Required. The format in which instances are given, must be one - of "jsonl", "csv", "bigquery", "tf-record", "tf-record-gzip", - or "file-list". Default is "jsonl" when using `gcs_source`. If a - `bigquery_source` is provided, this is overridden to "bigquery". + The format in which instances are provided. Must be one + of the formats listed in `Model.supported_input_storage_formats`. + Default is "jsonl" when using `gcs_source`. If a `bigquery_source` + is provided, this is overridden to "bigquery". gcs_destination_prefix: Optional[str] = None The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new @@ -2145,8 +2145,9 @@ def batch_predict( ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. predictions_format: str = "jsonl" - Required. The format in which Vertex AI gives the - predictions, must be one of "jsonl", "csv", or "bigquery". + Required. The format in which Vertex AI outputs the + predictions, must be one of the formats specified in + `Model.supported_output_storage_formats`. Default is "jsonl" when using `gcs_destination_prefix`. If a `bigquery_destination_prefix` is provided, this is overridden to "bigquery". diff --git a/samples/snippets/prediction_service/predict_image_classification_sample.py b/samples/snippets/prediction_service/predict_image_classification_sample.py index 0e0253d341..48a88bd256 100644 --- a/samples/snippets/prediction_service/predict_image_classification_sample.py +++ b/samples/snippets/prediction_service/predict_image_classification_sample.py @@ -52,7 +52,7 @@ def predict_image_classification_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/image_classification_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) diff --git a/samples/snippets/prediction_service/predict_tabular_classification_sample.py b/samples/snippets/prediction_service/predict_tabular_classification_sample.py index a9df7678a7..7de71ab38d 100644 --- a/samples/snippets/prediction_service/predict_tabular_classification_sample.py +++ b/samples/snippets/prediction_service/predict_tabular_classification_sample.py @@ -46,7 +46,7 @@ def predict_tabular_classification_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/tables_classification.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/tabular_classification_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) diff --git a/samples/snippets/prediction_service/predict_tabular_regression_sample.py b/samples/snippets/prediction_service/predict_tabular_regression_sample.py index 7f76dc6f17..2cd970c4f7 100644 --- a/samples/snippets/prediction_service/predict_tabular_regression_sample.py +++ b/samples/snippets/prediction_service/predict_tabular_regression_sample.py @@ -46,7 +46,7 @@ def predict_tabular_regression_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/tables_regression.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/tabular_regression_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) diff --git a/samples/snippets/prediction_service/predict_text_classification_single_label_sample.py b/samples/snippets/prediction_service/predict_text_classification_single_label_sample.py index c0579224ef..38e72f11e6 100644 --- a/samples/snippets/prediction_service/predict_text_classification_single_label_sample.py +++ b/samples/snippets/prediction_service/predict_text_classification_single_label_sample.py @@ -45,7 +45,7 @@ def predict_text_classification_single_label_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/text_classification.yaml for the format of the predictions. + predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) diff --git a/samples/snippets/prediction_service/predict_text_entity_extraction_sample.py b/samples/snippets/prediction_service/predict_text_entity_extraction_sample.py index be70a3c075..97b354b874 100644 --- a/samples/snippets/prediction_service/predict_text_entity_extraction_sample.py +++ b/samples/snippets/prediction_service/predict_text_entity_extraction_sample.py @@ -46,7 +46,7 @@ def predict_text_entity_extraction_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) diff --git a/samples/snippets/prediction_service/predict_text_sentiment_analysis_sample.py b/samples/snippets/prediction_service/predict_text_sentiment_analysis_sample.py index 934bc5da9e..f8e66afaa2 100644 --- a/samples/snippets/prediction_service/predict_text_sentiment_analysis_sample.py +++ b/samples/snippets/prediction_service/predict_text_sentiment_analysis_sample.py @@ -45,7 +45,7 @@ def predict_text_sentiment_analysis_sample( ) print("response") print(" deployed_model_id:", response.deployed_model_id) - # See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment.yaml for the format of the predictions. + # See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment_1.0.0.yaml for the format of the predictions. predictions = response.predictions for prediction in predictions: print(" prediction:", dict(prediction)) From 67fa1f179af66686339d797e5b368e96816ed1c5 Mon Sep 17 00:00:00 2001 From: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> Date: Tue, 7 Dec 2021 14:24:15 -0500 Subject: [PATCH 03/12] docs: Update references to containers and notebook samples. (#890) fixes #838 #678 --- README.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index a59857a13b..ca95583841 100644 --- a/README.rst +++ b/README.rst @@ -80,7 +80,7 @@ Overview ~~~~~~~~ This section provides a brief overview of the Vertex SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples. -.. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/ai-platform-samples/tree/master/ai-platform-unified/notebooks/unofficial/sdk +.. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/main/notebooks/community/sdk Importing ^^^^^^^^^ @@ -201,9 +201,9 @@ It must write the model artifact to the environment variable populated by the tr job = aiplatform.CustomTrainingJob( display_name="my-training-job", script_path="training_script.py", - container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest", + container_uri="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-2:latest", requirements=["gcsfs==0.7.1"], - model_serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest", + model_serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest", ) model = job.run(my_dataset, @@ -266,7 +266,7 @@ To upload a model: model = aiplatform.Model.upload( display_name='my-model', artifact_uri="gs://python/to/my/model/dir", - serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest", + serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest", ) To get a model: From fda942ffbe009077b47f36aad1c29603a451e38b Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Tue, 7 Dec 2021 12:58:53 -0800 Subject: [PATCH 04/12] fix: add param for multi-label per user's feedback (#887) * fix: add param for multi-label per user's feedback * fix: indentation * test: update assert for new params * lint: remove trailing whitespace --- ...create_training_pipeline_image_classification_sample.py | 7 ++++++- ...e_training_pipeline_image_classification_sample_test.py | 4 +++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/samples/model-builder/create_training_pipeline_image_classification_sample.py b/samples/model-builder/create_training_pipeline_image_classification_sample.py index 3786894a05..615e468485 100644 --- a/samples/model-builder/create_training_pipeline_image_classification_sample.py +++ b/samples/model-builder/create_training_pipeline_image_classification_sample.py @@ -24,6 +24,7 @@ def create_training_pipeline_image_classification_sample( display_name: str, dataset_id: int, model_display_name: Optional[str] = None, + multi_label: bool = False, training_fraction_split: float = 0.8, validation_fraction_split: float = 0.1, test_fraction_split: float = 0.1, @@ -33,7 +34,11 @@ def create_training_pipeline_image_classification_sample( ): aiplatform.init(project=project, location=location) - job = aiplatform.AutoMLImageTrainingJob(display_name=display_name) + job = aiplatform.AutoMLImageTrainingJob( + display_name=display_name, + prediction_type='classification', + multi_label=multi_label + ) my_image_ds = aiplatform.ImageDataset(dataset_id) diff --git a/samples/model-builder/create_training_pipeline_image_classification_sample_test.py b/samples/model-builder/create_training_pipeline_image_classification_sample_test.py index 1c7080e7a1..c5d7e14beb 100644 --- a/samples/model-builder/create_training_pipeline_image_classification_sample_test.py +++ b/samples/model-builder/create_training_pipeline_image_classification_sample_test.py @@ -44,7 +44,9 @@ def test_create_training_pipeline_image_classification_sample( project=constants.PROJECT, location=constants.LOCATION ) mock_get_automl_image_training_job.assert_called_once_with( - display_name=constants.DISPLAY_NAME + display_name=constants.DISPLAY_NAME, + multi_label=False, + prediction_type='classification' ) mock_run_automl_image_training_job.assert_called_once_with( dataset=mock_image_dataset, From 37ee0a1dc6e0105e19aca18f44995a352bfc40cb Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Tue, 7 Dec 2021 15:59:07 -0800 Subject: [PATCH 05/12] fix: add clarity to parameters per user feedback (#886) * fix: add clarity per user feedback * fix: review * fix: review * fix: TW review * lint: fix wsp * fix: param name * fix: code review Co-authored-by: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com> --- .../predict_tabular_classification_sample.py | 15 +++++++++++++-- .../predict_tabular_classification_sample_test.py | 2 +- .../predict_tabular_regression_sample.py | 7 +++++-- .../predict_tabular_regression_sample_test.py | 2 +- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/samples/model-builder/predict_tabular_classification_sample.py b/samples/model-builder/predict_tabular_classification_sample.py index e5b1a0283d..0a0a1da8bd 100644 --- a/samples/model-builder/predict_tabular_classification_sample.py +++ b/samples/model-builder/predict_tabular_classification_sample.py @@ -20,11 +20,22 @@ # [START aiplatform_sdk_predict_tabular_classification_sample] def predict_tabular_classification_sample( - project: str, location: str, endpoint: str, instances: List[Dict], + project: str, + location: str, + endpoint_name: str, + instances: List[Dict], ): + ''' + Args + project: Your project ID or project number. + location: Region where Endpoint is located. For example, 'us-central1'. + endpoint_name: A fully qualified endpoint name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or + "456" when project and location are initialized or passed. + instances: A list of one or more instances (examples) to return a prediction for. + ''' aiplatform.init(project=project, location=location) - endpoint = aiplatform.Endpoint(endpoint) + endpoint = aiplatform.Endpoint(endpoint_name) response = endpoint.predict(instances=instances) diff --git a/samples/model-builder/predict_tabular_classification_sample_test.py b/samples/model-builder/predict_tabular_classification_sample_test.py index 49a701115b..66f2976803 100644 --- a/samples/model-builder/predict_tabular_classification_sample_test.py +++ b/samples/model-builder/predict_tabular_classification_sample_test.py @@ -22,7 +22,7 @@ def test_predict_tabular_classification_sample(mock_sdk_init, mock_get_endpoint) predict_tabular_classification_sample.predict_tabular_classification_sample( project=constants.PROJECT, location=constants.LOCATION, - endpoint=constants.ENDPOINT_NAME, + endpoint_name=constants.ENDPOINT_NAME, instances=constants.PREDICTION_TABULAR_CLASSIFICATION_INSTANCE, ) diff --git a/samples/model-builder/predict_tabular_regression_sample.py b/samples/model-builder/predict_tabular_regression_sample.py index fee4d34e38..b7bf575d44 100644 --- a/samples/model-builder/predict_tabular_regression_sample.py +++ b/samples/model-builder/predict_tabular_regression_sample.py @@ -19,11 +19,14 @@ # [START aiplatform_sdk_predict_tabular_regression_sample] def predict_tabular_regression_sample( - project: str, location: str, endpoint: str, instances: List[Dict], + project: str, + location: str, + endpoint_name: str, + instances: List[Dict], ): aiplatform.init(project=project, location=location) - endpoint = aiplatform.Endpoint(endpoint) + endpoint = aiplatform.Endpoint(endpoint_name) response = endpoint.predict(instances=instances) diff --git a/samples/model-builder/predict_tabular_regression_sample_test.py b/samples/model-builder/predict_tabular_regression_sample_test.py index 7491d7c1d5..abda65b3c4 100644 --- a/samples/model-builder/predict_tabular_regression_sample_test.py +++ b/samples/model-builder/predict_tabular_regression_sample_test.py @@ -22,7 +22,7 @@ def test_predict_tabular_regression_sample(mock_sdk_init, mock_get_endpoint): predict_tabular_regression_sample.predict_tabular_regression_sample( project=constants.PROJECT, location=constants.LOCATION, - endpoint=constants.ENDPOINT_NAME, + endpoint_name=constants.ENDPOINT_NAME, instances=constants.PREDICTION_TABULAR_REGRESSOIN_INSTANCE, ) From a06da6d26cedb34a830f52a53d6d2afe7fc90c6f Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Wed, 8 Dec 2021 10:33:58 -0800 Subject: [PATCH 06/12] samples: add model type for create_training_pipeline_image_classification_sample (#891) --- .../create_training_pipeline_image_classification_sample.py | 2 ++ ...create_training_pipeline_image_classification_sample_test.py | 1 + samples/model-builder/test_constants.py | 1 + 3 files changed, 4 insertions(+) diff --git a/samples/model-builder/create_training_pipeline_image_classification_sample.py b/samples/model-builder/create_training_pipeline_image_classification_sample.py index 615e468485..417cfa43b0 100644 --- a/samples/model-builder/create_training_pipeline_image_classification_sample.py +++ b/samples/model-builder/create_training_pipeline_image_classification_sample.py @@ -24,6 +24,7 @@ def create_training_pipeline_image_classification_sample( display_name: str, dataset_id: int, model_display_name: Optional[str] = None, + model_type: str = "CLOUD", multi_label: bool = False, training_fraction_split: float = 0.8, validation_fraction_split: float = 0.1, @@ -36,6 +37,7 @@ def create_training_pipeline_image_classification_sample( job = aiplatform.AutoMLImageTrainingJob( display_name=display_name, + model_type=model_type, prediction_type='classification', multi_label=multi_label ) diff --git a/samples/model-builder/create_training_pipeline_image_classification_sample_test.py b/samples/model-builder/create_training_pipeline_image_classification_sample_test.py index c5d7e14beb..4f02441b4d 100644 --- a/samples/model-builder/create_training_pipeline_image_classification_sample_test.py +++ b/samples/model-builder/create_training_pipeline_image_classification_sample_test.py @@ -45,6 +45,7 @@ def test_create_training_pipeline_image_classification_sample( ) mock_get_automl_image_training_job.assert_called_once_with( display_name=constants.DISPLAY_NAME, + model_type=constants.MODEL_TYPE, multi_label=False, prediction_type='classification' ) diff --git a/samples/model-builder/test_constants.py b/samples/model-builder/test_constants.py index aa92434b95..0e4e0f5273 100644 --- a/samples/model-builder/test_constants.py +++ b/samples/model-builder/test_constants.py @@ -196,3 +196,4 @@ "gs://bucket3/custom-training-python-package/my_app/trainer-0.1.tar.gz" ) PYTHON_MODULE_NAME = "trainer.task" +MODEL_TYPE = "CLOUD" From f9aecd22fe08a97e45187b4d11c755ac3b9dfadd Mon Sep 17 00:00:00 2001 From: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com> Date: Fri, 10 Dec 2021 12:31:20 -0600 Subject: [PATCH 07/12] docs: Updated docstrings with exception error classes (#894) Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> --- google/cloud/aiplatform/base.py | 8 ++++---- google/cloud/aiplatform/datasets/_datasources.py | 4 ++-- google/cloud/aiplatform/datasets/dataset.py | 2 +- .../metadata/tf/v1/saved_model_metadata_builder.py | 2 +- .../metadata/tf/v2/saved_model_metadata_builder.py | 6 +++--- google/cloud/aiplatform/jobs.py | 4 ++-- google/cloud/aiplatform/metadata/metadata.py | 6 +++--- google/cloud/aiplatform/metadata/resource.py | 2 +- google/cloud/aiplatform/models.py | 6 +++--- google/cloud/aiplatform/training_jobs.py | 4 ++-- google/cloud/aiplatform/utils/featurestore_utils.py | 4 ++-- google/cloud/aiplatform/utils/tensorboard_utils.py | 2 +- 12 files changed, 25 insertions(+), 25 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 2ee4bf4635..d572913a25 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -504,7 +504,7 @@ def _get_and_validate_project_location( location(str): The location of the resource noun. Raises: - RuntimeError if location is different from resource location + RuntimeError: If location is different from resource location """ fields = utils.extract_fields_from_resource_name( @@ -604,7 +604,7 @@ def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when property is not accessible. Raises: - RuntimeError if _gca_resource is has not been created. + RuntimeError: If _gca_resource is has not been created. """ if self._gca_resource is None: raise RuntimeError( @@ -1115,7 +1115,7 @@ def _wait_for_resource_creation(self) -> None: job.run(sync=False, ...) job._wait_for_resource_creation() Raises: - RuntimeError if the resource has not been scheduled to be created. + RuntimeError: If the resource has not been scheduled to be created. """ # If the user calls this but didn't actually invoke an API to create @@ -1141,7 +1141,7 @@ def _assert_gca_resource_is_available(self) -> None: resource creation has failed asynchronously. Raises: - RuntimeError when resource has not been created. + RuntimeError: When resource has not been created. """ if not getattr(self._gca_resource, "name", None): raise RuntimeError( diff --git a/google/cloud/aiplatform/datasets/_datasources.py b/google/cloud/aiplatform/datasets/_datasources.py index 9323f40382..2ca2c02bfd 100644 --- a/google/cloud/aiplatform/datasets/_datasources.py +++ b/google/cloud/aiplatform/datasets/_datasources.py @@ -71,7 +71,7 @@ def __init__( "bq://project.dataset.table_name" Raises: - ValueError if source configuration is not valid. + ValueError: If source configuration is not valid. """ dataset_metadata = None @@ -215,7 +215,7 @@ def create_datasource( datasource (Datasource) Raises: - ValueError when below scenarios happen + ValueError: When below scenarios happen: - import_schema_uri is identified for creating TabularDatasource - either import_schema_uri or gcs_source is missing for creating NonTabularDatasourceImportable """ diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 5e5de0058b..cdb769a8b2 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -91,7 +91,7 @@ def _validate_metadata_schema_uri(self) -> None: """Validate the metadata_schema_uri of retrieved dataset resource. Raises: - ValueError if the dataset type of the retrieved dataset resource is + ValueError: If the dataset type of the retrieved dataset resource is not supported by the class. """ if self._supported_metadata_schema_uris and ( diff --git a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py index 6f0af6d93b..b7ffed4802 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py @@ -50,7 +50,7 @@ def __init__( signature_name) specifies multiple outputs. Raises: - ValueError if outputs_to_explain contains more than 1 element or + ValueError: If outputs_to_explain contains more than 1 element or signature contains multiple outputs. """ if outputs_to_explain: diff --git a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py index dd7f2b8d0a..7eb19386b4 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py @@ -49,8 +49,8 @@ def __init__( Any keyword arguments to be passed to tf.saved_model.save() function. Raises: - ValueError if outputs_to_explain contains more than 1 element. - ImportError if tf is not imported. + ValueError: If outputs_to_explain contains more than 1 element. + ImportError: If tf is not imported. """ if outputs_to_explain and len(outputs_to_explain) > 1: raise ValueError( @@ -91,7 +91,7 @@ def _infer_metadata_entries_from_model( Inferred input metadata and output metadata from the model. Raises: - ValueError if specified name is not found in signature outputs. + ValueError: If specified name is not found in signature outputs. """ loaded_sig = self._loaded_model.signatures[signature_name] diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index ae920ab2f3..1ad70faece 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -1049,7 +1049,7 @@ def __init__( staging_bucket set in aiplatform.init. Raises: - RuntimeError is not staging bucket was set using aiplatfrom.init and a staging + RuntimeError: If staging bucket was not set using aiplatform.init and a staging bucket was not passed in. """ @@ -1241,7 +1241,7 @@ def from_local_script( staging_bucket set in aiplatform.init. Raises: - RuntimeError is not staging bucket was set using aiplatfrom.init and a staging + RuntimeError: If staging bucket was not set using aiplatform.init and a staging bucket was not passed in. """ diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index 919eff8619..6ba664916e 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -157,8 +157,8 @@ def log_metrics(self, metrics: Dict[str, Union[float, int]]): metrics (Dict): Required. Metrics key/value pairs. Only flot and int are supported format for value. Raises: - TypeError if value contains unsupported types. - ValueError if Experiment or Run is not set. + TypeError: If value contains unsupported types. + ValueError: If Experiment or Run is not set. """ self._validate_experiment_and_run(method_name="log_metrics") @@ -265,7 +265,7 @@ def _validate_metrics_value_type(metrics: Dict[str, Union[float, int]]): metrics (Dict): Required. Metrics key/value pairs. Only flot and int are supported format for value. Raises: - TypeError if value contains unsupported types. + TypeError: If value contains unsupported types. """ for key, value in metrics.items(): diff --git a/google/cloud/aiplatform/metadata/resource.py b/google/cloud/aiplatform/metadata/resource.py index 3ebcaa5112..2727513234 100644 --- a/google/cloud/aiplatform/metadata/resource.py +++ b/google/cloud/aiplatform/metadata/resource.py @@ -451,7 +451,7 @@ def _extract_metadata_store_id(resource_name, resource_noun) -> str: metadata_store_id (str): The metadata store id for the particular resource name. Raises: - ValueError if it does not exist. + ValueError: If it does not exist. """ pattern = re.compile( r"^projects\/(?P[\w-]+)\/locations\/(?P[\w-]+)\/metadataStores\/(?P[\w-]+)\/" diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 17ddc5c70d..6aca4f8c27 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -786,7 +786,7 @@ def _deploy( will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. Raises: - ValueError if there is not current traffic split and traffic percentage + ValueError: If there is not current traffic split and traffic percentage is not 0 or 100. """ _LOGGER.log_action_start_against_resource( @@ -2366,9 +2366,9 @@ def export_model( Details of the completed export with output destination paths to the artifacts or container image. Raises: - ValueError if model does not support exporting. + ValueError: If model does not support exporting. - ValueError if invalid arguments or export formats are provided. + ValueError: If invalid arguments or export formats are provided. """ # Model does not support exporting diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 4afd4920db..aefcaa9dbc 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -4060,7 +4060,7 @@ def run( produce a Vertex AI Model. Raises: - RuntimeError if Training job has already been run or is waiting to run. + RuntimeError: If Training job has already been run or is waiting to run. """ if model_display_name: @@ -4269,7 +4269,7 @@ def _run_with_experiments( produce a Vertex AI Model. Raises: - RuntimeError if Training job has already been run or is waiting to run. + RuntimeError: If Training job has already been run or is waiting to run. """ if additional_experiments: diff --git a/google/cloud/aiplatform/utils/featurestore_utils.py b/google/cloud/aiplatform/utils/featurestore_utils.py index c78a96d185..23f3e48aad 100644 --- a/google/cloud/aiplatform/utils/featurestore_utils.py +++ b/google/cloud/aiplatform/utils/featurestore_utils.py @@ -47,7 +47,7 @@ def validate_and_get_entity_type_resource_ids( Tuple[str, str] - featurestore ID and entity_type ID Raises: - ValueError if the provided entity_type_name is not in form of a fully-qualified + ValueError: If the provided entity_type_name is not in form of a fully-qualified entityType resource name nor an entity_type ID with featurestore_id passed. """ match = CompatFeaturestoreServiceClient.parse_entity_type_path( @@ -91,7 +91,7 @@ def validate_and_get_feature_resource_ids( Tuple[str, str, str] - featurestore ID, entity_type ID, and feature ID Raises: - ValueError if the provided feature_name is not in form of a fully-qualified + ValueError: If the provided feature_name is not in form of a fully-qualified feature resource name nor a feature ID with featurestore_id and entity_type_id passed. """ diff --git a/google/cloud/aiplatform/utils/tensorboard_utils.py b/google/cloud/aiplatform/utils/tensorboard_utils.py index d3cb1ef704..acc9aad1ea 100644 --- a/google/cloud/aiplatform/utils/tensorboard_utils.py +++ b/google/cloud/aiplatform/utils/tensorboard_utils.py @@ -33,7 +33,7 @@ def _parse_experiment_name(experiment_name: str) -> Dict[str, str]: Components of the experiment name. Raises: - ValueError if the experiment_name is invalid. + ValueError: If the experiment_name is invalid. """ matched = TensorboardServiceClient.parse_tensorboard_experiment_path( experiment_name From dc0a30a40250e454c0b528b5e70e54625632e467 Mon Sep 17 00:00:00 2001 From: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com> Date: Fri, 10 Dec 2021 12:31:46 -0600 Subject: [PATCH 08/12] chore: Update Vertex AI naming references in README (#904) * Docs: Updated docstrings with exception error classes * chore: Update Vertex AI naming references in README Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> --- README.rst | 22 +++++++++++----------- docs/README.rst | 18 +++++++++--------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/README.rst b/README.rst index ca95583841..733348cc46 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -Vertex SDK for Python +Vertex AI SDK for Python ================================================= |GA| |pypi| |versions| |unit-tests| |system-tests| |sample-tests| @@ -78,7 +78,7 @@ Windows Overview ~~~~~~~~ -This section provides a brief overview of the Vertex SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples. +This section provides a brief overview of the Vertex AI SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples. .. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/main/notebooks/community/sdk @@ -106,7 +106,7 @@ Initialize the SDK to store common configurations that you use with the SDK. # defaults to us-central1 location='us-central1', - # Googlge Cloud Stoage bucket in same region as location + # Google Cloud Storage bucket in same region as location # used to stage artifacts staging_bucket='gs://my_staging_bucket', @@ -166,7 +166,7 @@ Vertex AI supports a variety of dataset schemas. References to these schemas are Training ^^^^^^^^ -The Vertex SDK for Python allows you train Custom and AutoML Models. +The Vertex AI SDK for Python allows you train Custom and AutoML Models. You can train custom models using a custom Python script, custom Python package, or container. @@ -217,7 +217,7 @@ In the code block above `my_dataset` is managed dataset created in the `Dataset` AutoMLs ------- -The Vertex SDK for Python supports AutoML tabular, image, text, video, and forecasting. +The Vertex AI SDK for Python supports AutoML tabular, image, text, video, and forecasting. To train an AutoML tabular model: @@ -358,7 +358,7 @@ To delete an endpoint: Pipelines --------- -To create a Vertex Pipeline run and monitor until completion: +To create a Vertex AI Pipeline run and monitor until completion: .. code-block:: Python @@ -382,7 +382,7 @@ To create a Vertex Pipeline run and monitor until completion: pipeline_root=pipeline_root, ) - # Execute pipeline in Vertex and monitor until completion + # Execute pipeline in Vertex AI and monitor until completion pl.run( # Email address of service account to use for the pipeline run # You must have iam.serviceAccounts.actAs permission on the service account to use it @@ -393,7 +393,7 @@ To create a Vertex Pipeline run and monitor until completion: sync=True ) -To create a Vertex Pipeline without monitoring until completion, use `submit` instead of `run`: +To create a Vertex AI Pipeline without monitoring until completion, use `submit` instead of `run`: .. code-block:: Python @@ -417,7 +417,7 @@ To create a Vertex Pipeline without monitoring until completion, use `submit` in pipeline_root=pipeline_root, ) - # Submit the Pipeline to Vertex + # Submit the Pipeline to Vertex AI pl.submit( # Email address of service account to use for the pipeline run # You must have iam.serviceAccounts.actAs permission on the service account to use it @@ -467,7 +467,7 @@ To use Explanation Metadata in endpoint deployment and model upload: Cloud Profiler ---------------------------- -Cloud Profiler allows you to profile your remote Vertex AI Training jobs on demand and visualize the results in Vertex Tensorboard. +Cloud Profiler allows you to profile your remote Vertex AI Training jobs on demand and visualize the results in Vertex AI Tensorboard. To start using the profiler with TensorFlow, update your training script to include the following: @@ -477,7 +477,7 @@ To start using the profiler with TensorFlow, update your training script to incl ... cloud_profiler.init() -Next, run the job with with a Vertex TensorBoard instance. For full details on how to do this, visit https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview +Next, run the job with with a Vertex AI TensorBoard instance. For full details on how to do this, visit https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview Finally, visit your TensorBoard in your Google Cloud Console, navigate to the "Profile" tab, and click the `Capture Profile` button. This will allow users to capture profiling statistics for the running jobs. diff --git a/docs/README.rst b/docs/README.rst index 74b9eb2214..f1c894550c 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -1,4 +1,4 @@ -Vertex SDK for Python +Vertex AI SDK for Python ================================================= |GA| |pypi| |versions| @@ -71,7 +71,7 @@ Windows Overview ~~~~~~~~ -This section provides a brief overview of the Vertex SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples. +This section provides a brief overview of the Vertex AI SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples. .. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/ai-platform-samples/tree/master/ai-platform-unified/notebooks/unofficial/sdk @@ -99,7 +99,7 @@ Initialize the SDK to store common configurations that you use with the SDK. # defaults to us-central1 location='us-central1', - # Googlge Cloud Stoage bucket in same region as location + # Google Cloud Storage bucket in same region as location # used to stage artifacts staging_bucket='gs://my_staging_bucket', @@ -159,7 +159,7 @@ Vertex AI supports a variety of dataset schemas. References to these schemas are Training ^^^^^^^^ -The Vertex SDK for Python allows you train Custom and AutoML Models. +The Vertex AI SDK for Python allows you train Custom and AutoML Models. You can train custom models using a custom Python script, custom Python package, or container. @@ -210,7 +210,7 @@ In the code block above `my_dataset` is managed dataset created in the `Dataset` AutoMLs ------- -The Vertex SDK for Python supports AutoML tabular, image, text, video, and forecasting. +The Vertex AI SDK for Python supports AutoML tabular, image, text, video, and forecasting. To train an AutoML tabular model: @@ -351,7 +351,7 @@ To delete an endpoint: Pipelines --------- -To create a Vertex Pipeline run and monitor until completion: +To create a Vertex AI Pipeline run and monitor until completion: .. code-block:: Python @@ -375,7 +375,7 @@ To create a Vertex Pipeline run and monitor until completion: pipeline_root=pipeline_root, ) - # Execute pipeline in Vertex and monitor until completion + # Execute pipeline in Vertex AI and monitor until completion pl.run( # Email address of service account to use for the pipeline run # You must have iam.serviceAccounts.actAs permission on the service account to use it @@ -386,7 +386,7 @@ To create a Vertex Pipeline run and monitor until completion: sync=True ) -To create a Vertex Pipeline without monitoring until completion, use `submit` instead of `run`: +To create a Vertex AI Pipeline without monitoring until completion, use `submit` instead of `run`: .. code-block:: Python @@ -410,7 +410,7 @@ To create a Vertex Pipeline without monitoring until completion, use `submit` in pipeline_root=pipeline_root, ) - # Submit the Pipeline to Vertex + # Submit the Pipeline to Vertex AI pl.submit( # Email address of service account to use for the pipeline run # You must have iam.serviceAccounts.actAs permission on the service account to use it From 1d81783b2f914dd7606ee884ca31c1a594e5135f Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Fri, 10 Dec 2021 15:20:15 -0800 Subject: [PATCH 09/12] fix: add clarity to param model_name (#888) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-aiplatform/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [x] Ensure the tests and linter pass - [x] Code coverage does not decrease (if any source code was changed) - [x] Appropriate docs were updated (if necessary) Fixes # 🦕 --- .../deploy_model_with_automatic_resources_sample.py | 5 +++++ .../deploy_model_with_dedicated_resources_sample.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/samples/model-builder/deploy_model_with_automatic_resources_sample.py b/samples/model-builder/deploy_model_with_automatic_resources_sample.py index 27976ae652..9efe3967f9 100644 --- a/samples/model-builder/deploy_model_with_automatic_resources_sample.py +++ b/samples/model-builder/deploy_model_with_automatic_resources_sample.py @@ -31,6 +31,11 @@ def deploy_model_with_automatic_resources_sample( metadata: Optional[Sequence[Tuple[str, str]]] = (), sync: bool = True, ): + """ + model_name: A fully-qualified model resource name or model ID. + Example: "projects/123/locations/us-central1/models/456" or + "456" when project and location are initialized or passed. + """ aiplatform.init(project=project, location=location) diff --git a/samples/model-builder/deploy_model_with_dedicated_resources_sample.py b/samples/model-builder/deploy_model_with_dedicated_resources_sample.py index 093dfae805..a0a9e0ffa3 100644 --- a/samples/model-builder/deploy_model_with_dedicated_resources_sample.py +++ b/samples/model-builder/deploy_model_with_dedicated_resources_sample.py @@ -37,6 +37,11 @@ def deploy_model_with_dedicated_resources_sample( metadata: Optional[Sequence[Tuple[str, str]]] = (), sync: bool = True, ): + """ + model_name: A fully-qualified model resource name or model ID. + Example: "projects/123/locations/us-central1/models/456" or + "456" when project and location are initialized or passed. + """ aiplatform.init(project=project, location=location) From 48c2bf1ea2fa42afea1b5d419527bfb8e49e0ac0 Mon Sep 17 00:00:00 2001 From: mkovalski Date: Tue, 14 Dec 2021 08:50:51 -0500 Subject: [PATCH 10/12] fix: Important the correct constants and use v1 for tensorboard experiments (#905) * fix: Important the correct constants and use v1 for tensorboard experiments * fix: fixing linting errors --- .../plugins/tensorflow/tensorboard_api.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py index 4da8381b4c..fcb881a75c 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py @@ -29,11 +29,10 @@ from google.api_core import exceptions from google.cloud import aiplatform from google.cloud import storage +from google.cloud.aiplatform.constants import base as constants from google.cloud.aiplatform.utils import TensorboardClientWithOverride from google.cloud.aiplatform.tensorboard import uploader_utils -from google.cloud.aiplatform.compat.types import ( - tensorboard_experiment_v1beta1 as tensorboard_experiment, -) +from google.cloud.aiplatform.compat.types import tensorboard_experiment from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader from google.cloud.aiplatform import training_utils @@ -42,9 +41,8 @@ def _get_api_client() -> TensorboardClientWithOverride: """Creates an Tensorboard API client.""" - aiplatform.constants.API_BASE_PATH = ( - training_utils.environment_variables.tensorboard_api_uri - ) + constants.API_BASE_PATH = training_utils.environment_variables.tensorboard_api_uri + m = re.match( "projects/.*/locations/(.*)/tensorboards/.*", training_utils.environment_variables.tensorboard_resource_name, From 45c4086dd07dd7d3d3b7417196ff61a7107d8a1a Mon Sep 17 00:00:00 2001 From: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> Date: Tue, 14 Dec 2021 16:09:26 -0500 Subject: [PATCH 11/12] fix: add support for API base path overriding (#908) --- google/cloud/aiplatform/initializer.py | 29 +++++++++++-------- .../plugins/tensorflow/tensorboard_api.py | 7 ++--- tests/unit/aiplatform/test_initializer.py | 9 ++++++ 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index 00f6b19b40..2aa98b1600 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -194,18 +194,20 @@ def encryption_spec_key_name(self) -> Optional[str]: return self._encryption_spec_key_name def get_client_options( - self, location_override: Optional[str] = None, prediction_client: bool = False + self, + location_override: Optional[str] = None, + prediction_client: bool = False, + api_base_path_override: Optional[str] = None, ) -> client_options.ClientOptions: """Creates GAPIC client_options using location and type. Args: location_override (str): - Set this parameter to get client options for a location different from - location set by initializer. Must be a GCP region supported by AI - Platform (Unified). - prediction_client (str): Optional flag to use a prediction endpoint. - - + Optional. Set this parameter to get client options for a location different + from location set by initializer. Must be a GCP region supported by + Vertex AI. + prediction_client (str): Optional. flag to use a prediction endpoint. + api_base_path_override (str): Optional. Override default API base path. Returns: clients_options (google.api_core.client_options.ClientOptions): A ClientOptions object set with regionalized API endpoint, i.e. @@ -222,7 +224,7 @@ def get_client_options( utils.validate_region(region) - service_base_path = ( + service_base_path = api_base_path_override or ( constants.PREDICTION_API_BASE_PATH if prediction_client else constants.API_BASE_PATH @@ -261,17 +263,19 @@ def create_client( credentials: Optional[auth_credentials.Credentials] = None, location_override: Optional[str] = None, prediction_client: bool = False, + api_base_path_override: Optional[str] = None, ) -> utils.VertexAiServiceClientWithOverride: """Instantiates a given VertexAiServiceClient with optional overrides. Args: client_class (utils.VertexAiServiceClientWithOverride): - (Required) A Vertex AI Service Client with optional overrides. + Required. A Vertex AI Service Client with optional overrides. credentials (auth_credentials.Credentials): - Custom auth credentials. If not provided will use the current config. - location_override (str): Optional location override. - prediction_client (str): Optional flag to use a prediction endpoint. + Optional. Custom auth credentials. If not provided will use the current config. + location_override (str): Optional. location override. + prediction_client (str): Optional. flag to use a prediction endpoint. + api_base_path_override (str): Optional. Override default api base path. Returns: client: Instantiated Vertex AI Service client with optional overrides """ @@ -288,6 +292,7 @@ def create_client( "client_options": self.get_client_options( location_override=location_override, prediction_client=prediction_client, + api_base_path_override=api_base_path_override, ), "client_info": client_info, } diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py index fcb881a75c..79b3285a1a 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py @@ -29,7 +29,6 @@ from google.api_core import exceptions from google.cloud import aiplatform from google.cloud import storage -from google.cloud.aiplatform.constants import base as constants from google.cloud.aiplatform.utils import TensorboardClientWithOverride from google.cloud.aiplatform.tensorboard import uploader_utils from google.cloud.aiplatform.compat.types import tensorboard_experiment @@ -41,8 +40,6 @@ def _get_api_client() -> TensorboardClientWithOverride: """Creates an Tensorboard API client.""" - constants.API_BASE_PATH = training_utils.environment_variables.tensorboard_api_uri - m = re.match( "projects/.*/locations/(.*)/tensorboards/.*", training_utils.environment_variables.tensorboard_resource_name, @@ -50,7 +47,9 @@ def _get_api_client() -> TensorboardClientWithOverride: region = m[1] api_client = aiplatform.initializer.global_config.create_client( - client_class=TensorboardClientWithOverride, location_override=region, + client_class=TensorboardClientWithOverride, + location_override=region, + api_base_path_override=training_utils.environment_variables.tensorboard_api_uri, ) return api_client diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py index f4043a5eba..e52dfef3aa 100644 --- a/tests/unit/aiplatform/test_initializer.py +++ b/tests/unit/aiplatform/test_initializer.py @@ -181,6 +181,15 @@ def test_get_client_options( == expected_endpoint ) + def test_get_client_options_with_api_override(self): + initializer.global_config.init(location="asia-east1") + + client_options = initializer.global_config.get_client_options( + api_base_path_override="override.googleapis.com" + ) + + assert client_options.api_endpoint == "asia-east1-override.googleapis.com" + class TestThreadPool: def teardown_method(self): From 0482d066db021a9699100d8bbc532cd980a63235 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 15 Dec 2021 13:38:00 -0500 Subject: [PATCH 12/12] chore: release 1.8.1 (#892) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- CHANGELOG.md | 19 +++++++++++++++++++ google/cloud/aiplatform/version.py | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de2f469c48..3e3632c5fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +### [1.8.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.8.0...v1.8.1) (2021-12-14) + + +### Bug Fixes + +* add clarity to param model_name ([#888](https://www.github.com/googleapis/python-aiplatform/issues/888)) ([1d81783](https://www.github.com/googleapis/python-aiplatform/commit/1d81783b2f914dd7606ee884ca31c1a594e5135f)) +* add clarity to parameters per user feedback ([#886](https://www.github.com/googleapis/python-aiplatform/issues/886)) ([37ee0a1](https://www.github.com/googleapis/python-aiplatform/commit/37ee0a1dc6e0105e19aca18f44995a352bfc40cb)) +* add param for multi-label per user's feedback ([#887](https://www.github.com/googleapis/python-aiplatform/issues/887)) ([fda942f](https://www.github.com/googleapis/python-aiplatform/commit/fda942ffbe009077b47f36aad1c29603a451e38b)) +* add support for API base path overriding ([#908](https://www.github.com/googleapis/python-aiplatform/issues/908)) ([45c4086](https://www.github.com/googleapis/python-aiplatform/commit/45c4086dd07dd7d3d3b7417196ff61a7107d8a1a)) +* Important the correct constants and use v1 for tensorboard experiments ([#905](https://www.github.com/googleapis/python-aiplatform/issues/905)) ([48c2bf1](https://www.github.com/googleapis/python-aiplatform/commit/48c2bf1ea2fa42afea1b5d419527bfb8e49e0ac0)) +* incorrect uri for IOD yaml ([#889](https://www.github.com/googleapis/python-aiplatform/issues/889)) ([e108ef8](https://www.github.com/googleapis/python-aiplatform/commit/e108ef8250c77c8a8edeccb6b601cbe0b0380c89)) +* Minor docstring and snippet fixes ([#873](https://www.github.com/googleapis/python-aiplatform/issues/873)) ([578e06d](https://www.github.com/googleapis/python-aiplatform/commit/578e06df481c3d60074a7b8e9365f8361b04e32b)) + + +### Documentation + +* Update references to containers and notebook samples. ([#890](https://www.github.com/googleapis/python-aiplatform/issues/890)) ([67fa1f1](https://www.github.com/googleapis/python-aiplatform/commit/67fa1f179af66686339d797e5b368e96816ed1c5)) +* Updated docstrings with exception error classes ([#894](https://www.github.com/googleapis/python-aiplatform/issues/894)) ([f9aecd2](https://www.github.com/googleapis/python-aiplatform/commit/f9aecd22fe08a97e45187b4d11c755ac3b9dfadd)) + ## [1.8.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.7.1...v1.8.0) (2021-12-03) diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 9a9bd8f7c3..8ab162f940 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.8.0" +__version__ = "1.8.1"